repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ashleyholman/bitcoin | qa/rpc-tests/txn_clone.py | 80 | 8079 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from test_framework.util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -1)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit | 8,288,038,926,523,926,000 | 46.804734 | 115 | 0.596608 | false |
crs4/ome_seadragon | ome_data/utils.py | 2 | 2287 | # Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .. import settings
from copy import copy
def _get_group_id_by_name(group_name, connection):
group = connection.getObject('ExperimenterGroup', attributes={'name': group_name})
if group:
return group.id
else:
return None
def _get_current_group_id(connection):
return connection.getGroupFromContext().id
def switch_to_default_search_group(connection):
if settings.DEFAULT_SEARCH_GROUP:
group_id = _get_group_id_by_name(settings.DEFAULT_SEARCH_GROUP, connection)
if group_id and (group_id != _get_current_group_id(connection)):
connection.setGroupForSession(group_id)
def _adapt_ellipse_roi(roi_json):
new_json = copy(roi_json)
try:
new_json['cx'] = new_json.pop('x')
new_json['cy'] = new_json.pop('y')
new_json['rx'] = new_json.pop('radiusX')
new_json['ry'] = new_json.pop('radiusY')
except KeyError:
pass
return new_json
def adapt_rois_json(rois):
adapted_rois = list()
for r in rois:
if r['type'] == 'Ellipse':
adapted_rois.append(_adapt_ellipse_roi(r))
else:
adapted_rois.append(r)
return adapted_rois
| mit | -6,791,084,387,814,346,000 | 35.887097 | 86 | 0.695234 | false |
jmargeta/scikit-learn | sklearn/svm/classes.py | 2 | 26517 | from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.selector_mixin import SelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, SelectorMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is choosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
# all the implementation is provided by the mixins
pass
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function.
It is significant only in 'poly' and 'sigmoid'.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf' and 'poly'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classififcation
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, shrinking=True, tol=0.001,
verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, None, verbose,
max_iter)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional
Degree of kernel function. Significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional
Independent term in kernel function. It is only significant in
poly/sigmoid.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| bsd-3-clause | -3,238,665,965,295,985,700 | 36.086713 | 79 | 0.641966 | false |
yahoo/TensorFlowOnSpark | doc/source/conf.py | 1 | 5262 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sphinx_rtd_theme
import sys
_pysrc = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..', '..'))
sys.path.insert(0, _pysrc)
autodoc_mock_imports = ["pyspark", "tensorflow"]
# -- Project information -----------------------------------------------------
project = 'TensorFlowOnSpark'
copyright = '2020, Yahoo Inc / Verizon Media'
author = 'Yahoo Inc'
# The short X.Y version
version = '2.2.4'
# The full version, including alpha/beta/rc tags
release = '2.2.4'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TensorFlowOnSparkdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TensorFlowOnSpark.tex', 'TensorFlowOnSpark Documentation',
'Lee Yang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorflowonspark', 'TensorFlowOnSpark Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TensorFlowOnSpark', 'TensorFlowOnSpark Documentation',
author, 'TensorFlowOnSpark', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| apache-2.0 | -3,839,136,365,204,922,000 | 29.952941 | 83 | 0.649183 | false |
gwdg/nova-docker | novadocker/tests/virt/docker/test_firewall.py | 14 | 5171 | # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.network import model as network_model
from nova import test
from nova.virt import firewall
from novadocker.virt.docker import driver
class DockerFirewallDriverTestCase(test.TestCase):
REQUIRES_LOCKING = True
gateway_bridge_4 = network_model.IP(address='10.11.12.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type='fixed',
version=4)]
subnet_bridge_4 = network_model.Subnet(cidr='10.11.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
ips=ips_bridge_4,
routes=None)
network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge='br100',
label=None,
subnets=[subnet_bridge_4],
bridge_interface='eth0')
vif_bridge = network_model.VIF(id='920be2f4-2b98-411e-890a-69bcabb2a5a0',
address='00:11:22:33:44:55',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE)
def setUp(self):
super(DockerFirewallDriverTestCase, self).setUp()
self.driver = driver.DockerDriver(None)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test_start_firewall(self, mock_aif, mock_sbf, mock_pif):
fake_inst = 'fake-inst'
fake_net_info = mock.ANY
self.driver._start_firewall(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_stop_firewall(self, mock_ui):
fake_inst = 'fake-inst'
fake_net_info = mock.ANY
self.driver._stop_firewall(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test_plug_vifs_bridge(self, mock_aif, mock_sbf, mock_pif):
fake_net_info = [self.vif_bridge]
with mock.patch('nova.utils.execute'):
d = driver.DockerDriver(object)
fake_inst = {'name': 'fake_instance'}
d.plug_vifs(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_unplug_vifs_ovs(self, mock_ui):
iface_id = '920be2f4-2b98-411e-890a-69bcabb2a5a0'
fake_net_info = [
{'network': {'bridge': 'br-int',
'subnets': [{'gateway': {'address': '10.11.12.1'},
'cidr': '10.11.12.0/24',
'ips': [{'address': '10.11.12.3',
'type': 'fixed', 'version': 4}]
}]},
'devname': 'tap920be2f4-2b',
'address': '00:11:22:33:44:55',
'id': iface_id,
'type': network_model.VIF_TYPE_OVS}]
with mock.patch('nova.utils.execute'):
d = driver.DockerDriver(object)
fake_inst = {'name': 'fake_instance', 'uuid': 'instance_uuid'}
d.unplug_vifs(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
| apache-2.0 | -709,178,775,706,649,600 | 45.585586 | 78 | 0.564301 | false |
Lilykos/invenio | invenio/ext/template/__init__.py | 8 | 7715 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Additional extensions and filters for jinja2 module."""
import re
from flask import _request_ctx_stack, g, request, url_for
from flask_login import current_user
from jinja2 import ChoiceLoader
from six import iteritems, string_types
from werkzeug.routing import BuildError
from .bccache import BytecodeCacheWithConfig
from .context_processor import setup_app as context_processor_setup_app
from .loader import OrderAwareDispatchingJinjaLoader
ENV_PREFIX = '_collected_'
def render_template_to_string(input, _from_string=False, **context):
"""Render a template from the template folder with the given context.
Code based on
`<https://github.com/mitsuhiko/flask/blob/master/flask/templating.py>`_
:param input: the string template, or name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
:return: a string
"""
ctx = _request_ctx_stack.top
ctx.app.update_template_context(context)
if _from_string:
template = ctx.app.jinja_env.from_string(input)
else:
template = ctx.app.jinja_env.get_or_select_template(input)
return template.render(context)
def inject_utils():
"""Inject variables and functions to jinja execution context.
In particular it will add:
- ``url_for``: an Invenio specific wrapper of Flask url_for, that will let
you obtain URLs for non Flask-native handlers (i.e. not yet ported
Invenio URLs)
- ``_``: this can be used to automatically translate a given string.
- ``is_language_rtl``: True if the chosen language should be read right to
left.
"""
from invenio.base.i18n import is_language_rtl
from invenio.modules.records.api import get_record
from invenio.utils.url import create_url, get_canonical_and_alternates_urls
def invenio_url_for(endpoint, **values):
try:
return url_for(endpoint, **values)
except BuildError:
if re.match("https?://", endpoint, re.IGNORECASE):
return endpoint
if endpoint.startswith('.'):
endpoint = request.blueprint + endpoint
url = create_url('/' + endpoint.replace('.', '/'), values, False)
return url.decode('utf-8')
user = current_user._get_current_object()
canonical_url, alternate_urls = get_canonical_and_alternates_urls(
request.path)
alternate_urls = dict((ln.replace('_', '-'), alternate_url)
for ln, alternate_url in iteritems(alternate_urls))
return dict(
current_user=user,
is_language_rtl=is_language_rtl,
canonical_url=canonical_url,
alternate_urls=alternate_urls,
get_record=get_record,
url_for=invenio_url_for,
)
def setup_app(app):
"""
Extend application template filters with custom filters and fixes.
List of applied filters:
- filesizeformat
- path_join
- quoted_txt2html
- invenio_format_date
- invenio_pretty_date
- invenio_url_args
"""
import os
from datetime import datetime
from invenio.utils.date import convert_datetext_to_dategui, \
convert_datestruct_to_dategui, pretty_date
from . import config
app.config.from_object(config)
context_processor_setup_app(app)
app.context_processor(inject_utils)
if app.config.get('JINJA2_BCCACHE', False):
app.jinja_options = dict(
app.jinja_options,
auto_reload=app.config.get('JINJA2_BCCACHE_AUTO_RELOAD', False),
cache_size=app.config.get('JINJA2_BCCACHE_SIZE', -1),
bytecode_cache=BytecodeCacheWithConfig(app))
# Let's customize the template loader to look into packages
# and application templates folders.
jinja_loader = ChoiceLoader([
OrderAwareDispatchingJinjaLoader(app),
app.jinja_loader,
])
app.jinja_loader = jinja_loader
for ext in app.config.get('JINJA2_EXTENSIONS', []):
try:
app.jinja_env.add_extension(ext)
except Exception:
app.logger.exception(
'Problem with loading extension: "{0}"'.format(ext))
def test_not_empty(v):
return v is not None and v != ''
@app.template_filter('u')
def tounicode(value):
if isinstance(value, str):
return value.decode('utf8')
return value
@app.template_filter('prefix')
def _prefix(value, prefix=''):
return prefix + value if test_not_empty(value) else ''
@app.template_filter('suffix')
def _suffix(value, suffix=''):
return value + suffix if test_not_empty(value) else ''
@app.template_filter('wrap')
def _wrap(value, prefix='', suffix=''):
return prefix + value + suffix if test_not_empty(value) else ''
@app.template_filter('sentences')
def _sentences(value, limit, separator='. '):
"""Return first `limit` number of sentences ending by `separator`."""
return separator.join(value.split(separator)[:limit])
@app.template_filter('path_join')
def _os_path_join(d):
"""Shortcut for `os.path.join`."""
return os.path.join(*d)
@app.template_filter('quoted_txt2html')
def _quoted_txt2html(*args, **kwargs):
from invenio.utils.mail import email_quoted_txt2html
return email_quoted_txt2html(*args, **kwargs)
@app.template_filter('invenio_format_date')
def _format_date(date):
"""
Format a date into a human friendly format.
It uses :py:func:`invenio.utils.date.convert_datetext_to_dategui`
"""
if isinstance(date, datetime):
return convert_datestruct_to_dategui(
date.timetuple(),
getattr(g, 'ln', app.config['CFG_SITE_LANG'])).decode('utf-8')
return convert_datetext_to_dategui(
date, getattr(g, 'ln', app.config['CFG_SITE_LANG'])
).decode('utf-8')
@app.template_filter('invenio_pretty_date')
def _pretty_date(date):
"""
Format a timestamp into a human friendly format.
It uses :py:func:`invenio.utils.date.pretty_date`
"""
if isinstance(date, datetime) or isinstance(date, string_types):
return pretty_date(
date, ln=getattr(g, 'ln', app.config['CFG_SITE_LANG']))
return date
@app.template_filter('invenio_url_args')
def _url_args(d, append=u'?', filter=[]):
from jinja2.utils import escape
rv = append + u'&'.join(
u'%s=%s' % (escape(key), escape(value))
for key, value in d.iteritems(True)
if value is not None and key not in filter
# and not isinstance(value, Undefined)
)
return rv
return app
| gpl-2.0 | -4,055,307,586,010,908,700 | 33.137168 | 79 | 0.643552 | false |
huangciyin/youtube-dl | youtube_dl/update.py | 19 | 7354 | import io
import json
import traceback
import hashlib
import os
import subprocess
import sys
from zipimport import zipimporter
from .utils import (
compat_str,
compat_urllib_request,
)
from .version import __version__
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
def update_self(to_screen, verbose):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
to_screen(u'It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
# Check if there is a new version
try:
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
return
if not 'signature' in versions_info:
to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
return
version_id = versions_info['latest']
def version_tuple(version_str):
return tuple(map(int, version_str.split('.')))
if version_tuple(__version__) >= version_tuple(version_id):
to_screen(u'youtube-dl is up to date (%s)' % __version__)
return
to_screen(u'Updating to version ' + version_id + ' ...')
version = versions_info['versions'][version_id]
print_notes(to_screen, versions_info['versions'])
filename = sys.argv[0]
# Py2EXE: Filename could be different
if hasattr(sys, "frozen") and not os.path.isfile(filename):
if os.path.isfile(filename + u'.exe'):
filename += u'.exe'
if not os.access(filename, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, "frozen"):
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % directory)
return
try:
urlh = compat_urllib_request.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
with io.open(bat, 'w') as batfile:
batfile.write(u"""
@echo off
echo Waiting for file handle to be closed ...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" > NUL
echo Updated youtube-dl to version %s.
start /b "" cmd /c del "%%~f0"&exit /b"
\n""" % (exe, exe, version_id))
subprocess.Popen([bat]) # Continues to run in the background
return # Do not show premature success messages
except (IOError, OSError):
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = compat_urllib_request.urlopen(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
def get_notes(versions, fromVersion):
notes = []
for v,vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
return notes
def print_notes(to_screen, versions, fromVersion=__version__):
notes = get_notes(versions, fromVersion)
if notes:
to_screen(u'PLEASE NOTE:')
for note in notes:
to_screen(note)
| unlicense | 7,776,189,574,265,000,000 | 36.712821 | 289 | 0.623606 | false |
AnthonyBriggs/Python-101 | hello_python_source_py3/chapter 07/getattr_setattr.py | 1 | 2040 |
"""
An example of using __getattr__ and __setattr__
"""
class TestGetAttr(object):
def __getattr__(self, name):
print("Attribute '%s' not found!" % name)
return 42
test_class = TestGetAttr()
print(test_class.something)
test_class.something = 43
print(test_class.something)
class TestSetAttr(object):
def __init__(self):
self.__dict__['things'] = {}
def __setattr__(self, name, value):
print("Setting '%s' to '%s'" % (name, value))
self.things[name] = value
def __getattr__(self, name):
try:
return self.things[name]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
test_class2 = TestSetAttr()
test_class2.something = 42
print(test_class2.something)
print(test_class2.things)
#print test_class2.something_else
def get_real_attr(instance, name):
return object.__getattribute__(instance, name)
class TestGetAttribute(object):
def __init__(self, things=None):
my_dict = get_real_attr(self, '__dict__')
if not things:
my_dict['things'] = {}
else:
my_dict['things'] = things
def __setattr__(self, name, value):
print("Setting '%s' to '%s'" % (name, value))
my_dict = get_real_attr(self, '__dict__')
my_dict['things'][name] = value
def __getattribute__(self, name):
try:
my_dict = get_real_attr(self, '__dict__')
return my_dict['things'][name]
except KeyError:
my_class = get_real_attr(self, '__class__')
raise AttributeError(
"'%s' object has no attribute '%s'" %
(my_class.__name__, name))
test_class3 = TestGetAttribute({'foo': 'bar'})
print(object.__getattribute__(test_class3, '__dict__'))
test_class3.something = 43
print(object.__getattribute__(test_class3, '__dict__'))
print(test_class3.foo)
| mit | 5,849,384,389,875,572,000 | 26.2 | 55 | 0.551471 | false |
scholer/py2cytoscape | py2cytoscape/data/style.py | 1 | 5675 | from . import BASE_URL, HEADERS
import requests
import json
import pandas as pd
class Style(object):
def __init__(self, name):
# Validate required argument
if name is None:
raise ValueError("Style name is required.")
else:
self.__name = name
self.__url = BASE_URL + 'styles/' + str(name) + '/'
def get_name(self):
"""
Get immutable name of this Visual Style.
:return: Style name as string
"""
return self.__name
def __get_new_mapping(self, mapping_type, column=None, col_type='String',
vp=None):
if column is None or vp is None:
raise ValueError('both column name and visual property are required.')
new_maping = {
'mappingType': mapping_type,
'mappingColumn': column,
'mappingColumnType': col_type,
'visualProperty': vp
}
return new_maping
def create_discrete_mapping(self, column=None, col_type='String',
vp=None, mappings=None):
self.__call_create_mapping(
self.__get_discrete(column=column, col_type=col_type, vp=vp,
mappings=mappings))
def create_continuous_mapping(self, column=None, col_type='String',
vp=None, points=None):
self.__call_create_mapping(
self.__get_continuous(column=column, col_type=col_type, vp=vp,
points=points))
def create_passthrough_mapping(self, column=None, col_type='String',
vp=None):
self.__call_create_mapping(
self.__get_passthrough(column=column, col_type=col_type, vp=vp))
def __call_create_mapping(self, mapping):
url = self.__url + 'mappings'
requests.post(url, data=json.dumps([mapping]), headers=HEADERS)
def __get_passthrough(self, column=None, col_type='String', vp=None):
return self.__get_new_mapping('passthrough', column=column,
col_type=col_type, vp=vp)
def __get_discrete(self, column=None, col_type='String', vp=None,
mappings=None):
new_mapping = self.__get_new_mapping('discrete', column=column,
col_type=col_type, vp=vp)
if mappings is None:
raise ValueError('key-value pair object (mappings) is required.')
body = [{'key': key, 'value': mappings[key]} for key in mappings.keys()]
new_mapping['map'] = body
return new_mapping
def __get_continuous(self, column=None, col_type='String', vp=None,
points=None):
if points is None:
raise ValueError('key-value pair object (mappings) is required.')
new_mapping = self.__get_new_mapping('continuous', column=column,
col_type=col_type, vp=vp)
new_mapping['points'] = points
return new_mapping
def get_mapping(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'mappings/' + vp
return requests.get(url).json()
def get_mappings(self):
url = self.__url + 'mappings'
return requests.get(url).json()
def get_default(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'defaults/' + vp
key_value_pair = requests.get(url).content
print(key_value_pair)
key2 = requests.get(url).json()
key_value_pair = key2
return pd.Series({key_value_pair['visualProperty']: key_value_pair[
'value']})
def get_defaults(self):
url = self.__url + 'defaults'
result = requests.get(url).json()['defaults']
vals = {entry['visualProperty']: entry['value'] for entry in result}
return pd.Series(vals)
def update_defaults(self, key_value_pair):
body = []
for key in key_value_pair:
entry = {
'visualProperty': key,
'value': key_value_pair[key]
}
body.append(entry)
url = self.__url + 'defaults'
requests.put(url, data=json.dumps(body), headers=HEADERS)
# Delete Methods
def delete_mapping(self, vp=None):
if vp is None:
return
url = self.__url + 'mappings/' + vp
requests.delete(url)
def delete_mappings(self):
url = self.__url + 'mappings'
requests.delete(url)
class StyleUtil(object):
@staticmethod
def create_2_color_gradient(min=0, max=10, colors=('red', 'green')):
points = [
{
'value': str(min),
'lesser': colors[0],
'equal': colors[0],
'greater': colors[0],
},
{
'value': str(max),
'lesser': colors[1],
'equal': colors[1],
'greater': colors[1]
}
]
return points
@staticmethod
def create_slope(min=0, max=10, values=(1, 10)):
points = [
{
'value': str(min),
'lesser': values[0],
'equal': values[0],
'greater': values[0],
},
{
'value': str(max),
'lesser': values[1],
'equal': values[1],
'greater': values[1]
}
]
return points
| mit | 5,545,519,130,868,006,000 | 30.703911 | 82 | 0.513304 | false |
wwf5067/statsmodels | statsmodels/examples/ex_kernel_regression3.py | 34 | 2380 | # -*- coding: utf-8 -*-
"""script to try out Censored kernel regression
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
x2 = x**2 + 0.02 * np.random.normal(size=nobs)
y_true = np.sin(x*5)/x + 2*x - 3 * x2
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
cens_side = ['left', 'right', 'random'][2]
if cens_side == 'left':
c_val = 0.5
y_cens = np.clip(y, c_val, 100)
elif cens_side == 'right':
c_val = 3.5
y_cens = np.clip(y, -100, c_val)
elif cens_side == 'random':
c_val = 3.5 + 3 * np.random.randn(nobs)
y_cens = np.minimum(y, c_val)
model = nparam.KernelCensoredReg(endog=[y_cens],
#exog=[np.column_stack((x, x**2))], reg_type='lc',
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic', #'cv_ls', #[0.23, 434697.22], #'cv_ls',
censor_val=c_val[:,None],
#defaults=nparam.EstimatorSettings(efficient=True)
)
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
# model1 = nparam.KernelReg(endog=[y],
# exog=[x], reg_type='lc',
# var_type='c', bw='cv_ls')
# mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y_cens],
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic',# 'cv_ls'
)
mean2, mfx2 = model2.fit()
print(model.bw)
#print model1.bw
print(model2.bw)
ix = np.argsort(y_cens)
ix_rev = np.zeros(nobs, int)
ix_rev[ix] = np.arange(nobs)
ix_rev = model.sortix_rev
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_cens, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean[ix_rev], lw=2, label='model 0 mean')
ax.plot(x, mean2, lw=2, label='model 2 mean')
ax.legend()
plt.show()
| bsd-3-clause | 2,633,622,360,178,091,000 | 29.909091 | 93 | 0.49958 | false |
CallaJun/hackprince | indico/tests/test_remote.py | 1 | 10335 | import unittest
import os
from requests import ConnectionError
import numpy as np
import skimage.io
from nose.plugins.skip import Skip, SkipTest
from indicoio import config
from indicoio import political, sentiment, fer, facial_features, language, image_features, text_tags
from indicoio import batch_political, batch_sentiment, batch_fer, batch_facial_features
from indicoio import batch_language, batch_image_features, batch_text_tags
DIR = os.path.dirname(os.path.realpath(__file__))
class BatchAPIRun(unittest.TestCase):
def setUp(self):
self.api_key = config.api_key
config.url_protocol = "http:"
if not all(self.api_key):
raise SkipTest
def tearDown(self):
config.url_protocol = "https:"
def test_batch_texttags(self):
test_data = ["On Monday, president Barack Obama will be..."]
response = batch_text_tags(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
def test_batch_posneg(self):
test_data = ['Worst song ever', 'Best song ever']
response = batch_sentiment(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(response[0] < 0.5)
def test_batch_political(self):
test_data = ["Guns don't kill people, people kill people."]
response = batch_political(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
def test_batch_fer(self):
test_data = [np.random.rand(48, 48).tolist()]
response = batch_fer(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(isinstance(response[0], dict))
def test_batch_facial_features(self):
test_data = [np.random.rand(48, 48).tolist()]
response = batch_facial_features(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(isinstance(response[0], list))
self.assertEqual(len(response[0]), 48)
# TODO: uncomment this test once the remote server is updated to
# deal with image_urls
# def test_batch_image_urls(self):
# test_data = ['http://textfac.es/static/ico/favicon.png',
# 'http://textfac.es/static/ico/favicon.png']
# response = batch_facial_features(test_data, auth=self.auth)
# self.assertTrue(isinstance(response, list))
# self.assertTrue(isinstance(response[0], list))
# self.assertEqual(len(response[0]), 48)
# TODO: add tests to test when one url is incorrect once we
# have decided how we are dealing with them
def test_batch_image_features_greyscale(self):
test_data = [np.random.rand(64, 64).tolist()]
response = batch_image_features(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(isinstance(response[0], list))
self.assertEqual(len(response[0]), 2048)
def test_batch_image_features_rgb(self):
test_data = [np.random.rand(64, 64, 3).tolist()]
response = batch_image_features(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(isinstance(response[0], list))
self.assertEqual(len(response[0]), 2048)
def test_batch_language(self):
test_data = ['clearly an english sentence']
response = batch_language(test_data, api_key=self.api_key)
self.assertTrue(isinstance(response, list))
self.assertTrue(response[0]['English'] > 0.25)
def test_batch_set_cloud(self):
test_data = ['clearly an english sentence']
self.assertRaises(ConnectionError,
batch_language,
test_data,
api_key=self.api_key,
cloud='invalid/cloud')
class FullAPIRun(unittest.TestCase):
def load_image(self, relpath, as_grey=False):
image_path = os.path.normpath(os.path.join(DIR, relpath))
image = skimage.io.imread(image_path, as_grey=True).tolist()
return image
def check_range(self, list, minimum=0.9, maximum=0.1, span=0.5):
vector = np.asarray(list)
self.assertTrue(vector.max() > maximum)
self.assertTrue(vector.min() < minimum)
self.assertTrue(np.ptp(vector) > span)
def test_text_tags(self):
text = "On Monday, president Barack Obama will be..."
results = text_tags(text)
max_keys = sorted(results.keys(), key=lambda x:results.get(x), reverse=True)
assert 'political_discussion' in max_keys[:5]
results = text_tags(text, top_n=5)
assert len(results) is 5
results = text_tags(text, threshold=0.1)
for v in results.values():
assert v >= 0.1
def test_political(self):
political_set = set(['Libertarian', 'Liberal', 'Conservative', 'Green'])
test_string = "Guns don't kill people, people kill people."
response = political(test_string)
self.assertTrue(isinstance(response, dict))
self.assertEqual(political_set, set(response.keys()))
test_string = "pro-choice"
response = political(test_string)
self.assertTrue(isinstance(response, dict))
assert response['Liberal'] > 0.25
def test_posneg(self):
test_string = "Worst song ever."
response = sentiment(test_string)
self.assertTrue(isinstance(response, float))
self.assertTrue(response < 0.5)
test_string = "Best song ever."
response = sentiment(test_string)
self.assertTrue(isinstance(response, float))
self.assertTrue(response > 0.5)
def test_good_fer(self):
fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
test_face = np.random.rand(48,48).tolist()
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertEqual(fer_set, set(response.keys()))
def test_happy_fer(self):
test_face = self.load_image("data/happy.png", as_grey=True)
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertTrue(response['Happy'] > 0.5)
def test_fear_fer(self):
test_face = self.load_image("data/fear.png", as_grey=True)
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertTrue(response['Fear'] > 0.25)
def test_bad_fer(self):
fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
test_face = np.random.rand(56,56).tolist()
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertEqual(fer_set, set(response.keys()))
def test_good_facial_features(self):
test_face = np.random.rand(48,48).tolist()
response = facial_features(test_face)
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 48)
self.check_range(response)
# TODO: uncomment this test once the remote server is updated to
# deal with image_urls
# def test_image_url(self):
# test_face = 'http://textfac.es/static/ico/favicon.png'
# response = facial_features(test_face)
# self.assertTrue(isinstance(response, list))
# self.assertEqual(len(response), 48)
# self.check_range(response)
def test_good_image_features_greyscale(self):
test_image = np.random.rand(64, 64).tolist()
response = image_features(test_image)
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 2048)
self.check_range(response)
def test_good_image_features_rgb(self):
test_image = np.random.rand(64, 64, 3).tolist()
response = image_features(test_image)
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 2048)
self.check_range(response)
def test_language(self):
language_set = set([
'English',
'Spanish',
'Tagalog',
'Esperanto',
'French',
'Chinese',
'French',
'Bulgarian',
'Latin',
'Slovak',
'Hebrew',
'Russian',
'German',
'Japanese',
'Korean',
'Portuguese',
'Italian',
'Polish',
'Turkish',
'Dutch',
'Arabic',
'Persian (Farsi)',
'Czech',
'Swedish',
'Indonesian',
'Vietnamese',
'Romanian',
'Greek',
'Danish',
'Hungarian',
'Thai',
'Finnish',
'Norwegian',
'Lithuanian'
])
language_dict = language('clearly an english sentence')
self.assertEqual(language_set, set(language_dict.keys()))
assert language_dict['English'] > 0.25
def test_set_cloud(self):
test_data = 'clearly an english sentence'
self.assertRaises(ConnectionError,
language,
test_data,
cloud='invalid/cloud')
temp_cloud = config.cloud
config.cloud = 'invalid/cloud'
self.assertEqual(config.cloud, 'invalid/cloud')
self.assertRaises(ConnectionError,
language,
test_data)
config.cloud = temp_cloud
self.assertRaises(ConnectionError,
language,
test_data,
cloud='indico-test')
def test_set_api_key(self):
test_data = 'clearly an english sentence'
self.assertRaises(ValueError,
language,
test_data,
api_key ='invalid_api_key')
temp_api_key = config.api_key
config.api_key = 'invalid_api_key'
self.assertEqual(config.api_key, 'invalid_api_key')
self.assertRaises(ValueError,
language,
test_data)
config.api_key = temp_api_key
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | -6,256,368,081,751,042,000 | 34.153061 | 100 | 0.591776 | false |
zhiwliu/openshift-ansible | roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py | 9 | 1140 | '''
Openshift Sanitize inventory class that provides useful filters used in Logging.
'''
import re
# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
def map_from_pairs(source, delim="="):
''' Returns a dict given the source and delim delimited '''
if source == '':
return dict()
return dict(item.split(delim) for item in source.split(","))
def vars_with_pattern(source, pattern=""):
''' Returns a list of variables whose name matches the given pattern '''
if source == '':
return list()
var_list = list()
var_pattern = re.compile(pattern)
for item in source:
if var_pattern.match(item):
var_list.append(item)
return var_list
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
''' Returns the names of the filters provided by this class '''
return {
'map_from_pairs': map_from_pairs,
'vars_with_pattern': vars_with_pattern
}
| apache-2.0 | 6,817,653,458,639,450,000 | 24.909091 | 93 | 0.637719 | false |
kylecarter/rest-django | tutorial/urls.py | 1 | 1389 | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from rest_framework import routers
from .restaurantapi.views import UserViewSet, GroupViewSet, MenuViewSet, MenuItemViewSet, AvailableMenuList, AvailableMenuDetail
router = routers.DefaultRouter()
router.register(r'user', UserViewSet)
router.register(r'group', GroupViewSet)
router.register(r'menu', MenuViewSet)
router.register(r'menuitem', MenuItemViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^available-menus/$', AvailableMenuList.as_view()),
url(r'^available-menus/(?P<pk>[0-9]+)/$', AvailableMenuDetail.as_view()),
)
| apache-2.0 | 4,002,521,200,153,434,600 | 41.090909 | 128 | 0.728582 | false |
amwelch/a10sdk-python | a10sdk/core/enable/enable_core.py | 2 | 1263 | from a10sdk.common.A10BaseClass import A10BaseClass
class EnableCore(A10BaseClass):
"""Class Description::
Enable system coredump switch.
Class enable-core supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param core_level: {"description": "'a10': Enable A10 core dump, by default; 'system': Enable system coredump; ", "format": "enum", "default": "a10", "optional": true, "enum": ["a10", "system"], "not": "full", "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/enable-core`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "enable-core"
self.a10_url="/axapi/v3/enable-core"
self.DeviceProxy = ""
self.uuid = ""
self.core_level = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | 4,154,146,131,813,860,400 | 33.135135 | 231 | 0.621536 | false |
Orochimarufan/youtube-dl | youtube_dl/extractor/imggaming.py | 12 | 5111 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
try_get,
)
class ImgGamingBaseIE(InfoExtractor):
_API_BASE = 'https://dce-frontoffice.imggaming.com/api/v2/'
_API_KEY = '857a1e5d-e35e-4fdf-805b-a87b6f8364bf'
_HEADERS = None
_MANIFEST_HEADERS = {'Accept-Encoding': 'identity'}
_REALM = None
_VALID_URL_TEMPL = r'https?://(?P<domain>%s)/(?P<type>live|playlist|video)/(?P<id>\d+)(?:\?.*?\bplaylistId=(?P<playlist_id>\d+))?'
def _real_initialize(self):
self._HEADERS = {
'Realm': 'dce.' + self._REALM,
'x-api-key': self._API_KEY,
}
email, password = self._get_login_info()
if email is None:
self.raise_login_required()
p_headers = self._HEADERS.copy()
p_headers['Content-Type'] = 'application/json'
self._HEADERS['Authorization'] = 'Bearer ' + self._download_json(
self._API_BASE + 'login',
None, 'Logging in', data=json.dumps({
'id': email,
'secret': password,
}).encode(), headers=p_headers)['authorisationToken']
def _call_api(self, path, media_id):
return self._download_json(
self._API_BASE + path + media_id, media_id, headers=self._HEADERS)
def _extract_dve_api_url(self, media_id, media_type):
stream_path = 'stream'
if media_type == 'video':
stream_path += '/vod/'
else:
stream_path += '?eventId='
try:
return self._call_api(
stream_path, media_id)['playerUrlCallback']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
raise ExtractorError(
self._parse_json(e.cause.read().decode(), media_id)['messages'][0],
expected=True)
raise
def _real_extract(self, url):
domain, media_type, media_id, playlist_id = re.match(self._VALID_URL, url).groups()
if playlist_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % media_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)
media_type, media_id = 'playlist', playlist_id
if media_type == 'playlist':
playlist = self._call_api('vod/playlist/', media_id)
entries = []
for video in try_get(playlist, lambda x: x['videos']['vods']) or []:
video_id = str_or_none(video.get('id'))
if not video_id:
continue
entries.append(self.url_result(
'https://%s/video/%s' % (domain, video_id),
self.ie_key(), video_id))
return self.playlist_result(
entries, media_id, playlist.get('title'),
playlist.get('description'))
dve_api_url = self._extract_dve_api_url(media_id, media_type)
video_data = self._download_json(dve_api_url, media_id)
is_live = media_type == 'live'
if is_live:
title = self._live_title(self._call_api('event/', media_id)['title'])
else:
title = video_data['name']
formats = []
for proto in ('hls', 'dash'):
media_url = video_data.get(proto + 'Url') or try_get(video_data, lambda x: x[proto]['url'])
if not media_url:
continue
if proto == 'hls':
m3u8_formats = self._extract_m3u8_formats(
media_url, media_id, 'mp4', 'm3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False, headers=self._MANIFEST_HEADERS)
for f in m3u8_formats:
f.setdefault('http_headers', {}).update(self._MANIFEST_HEADERS)
formats.append(f)
else:
formats.extend(self._extract_mpd_formats(
media_url, media_id, mpd_id='dash', fatal=False,
headers=self._MANIFEST_HEADERS))
self._sort_formats(formats)
subtitles = {}
for subtitle in video_data.get('subtitles', []):
subtitle_url = subtitle.get('url')
if not subtitle_url:
continue
subtitles.setdefault(subtitle.get('lang', 'en_US'), []).append({
'url': subtitle_url,
})
return {
'id': media_id,
'title': title,
'formats': formats,
'thumbnail': video_data.get('thumbnailUrl'),
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'tags': video_data.get('tags'),
'is_live': is_live,
'subtitles': subtitles,
}
| unlicense | -6,512,006,905,178,425,000 | 37.428571 | 134 | 0.531207 | false |
garaden/flask | tests/test_helpers.py | 3 | 26555 | # -*- coding: utf-8 -*-
"""
tests.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import datetime
import flask
from logging import StreamHandler
from werkzeug.exceptions import BadRequest
from werkzeug.http import parse_cache_control_header, parse_options_header
from werkzeug.http import http_date
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class TestJSON(object):
def test_post_empty_json_adds_exception_to_response_content_in_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = True
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' in rv.data
def test_post_empty_json_wont_add_exception_to_response_if_no_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = False
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' not in rv.data
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
assert rv.status_code == 400
def test_json_custom_mimetypes(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.request.get_json()
c = app.test_client()
rv = c.post('/json', data='"foo"', content_type='application/x+json')
assert rv.data == b'foo'
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
assert resp.data == u'Hällo Wörld'.encode('utf-8')
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == '"\\u2603"'
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == u'"\u2603"'
def test_jsonify_basic_types(self):
"""Test jsonify with basic types."""
# Should be able to use pytest parametrize on this, but I couldn't
# figure out the correct syntax
# https://pytest.org/latest/parametrize.html#pytest-mark-parametrize-parametrizing-test-functions
test_data = (0, 1, 23, 3.14, 's', "longer string", True, False,)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_data):
url = '/jsonify_basic_types{0}'.format(i)
app.add_url_rule(url, str(i), lambda x=d: flask.jsonify(x))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_dicts(self):
"""Test jsonify with dicts and kwargs unpacking."""
d = dict(
a=0, b=23, c=3.14, d='t', e='Hi', f=True, g=False,
h=['test list', 10, False],
i={'test':'dict'}
)
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_arrays(self):
"""Test jsonify of lists and args unpacking."""
l = [
0, 42, 3.14, 't', 'hello', True, False,
['test list', 2, False],
{'test':'dict'}
]
app = flask.Flask(__name__)
@app.route('/args_unpack')
def return_args_unpack():
return flask.jsonify(*l)
@app.route('/array')
def return_array():
return flask.jsonify(l)
c = app.test_client()
for url in '/args_unpack', '/array':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == l
def test_jsonify_date_types(self):
"""Test jsonify with datetime.date and datetime.datetime types."""
test_dates = (
datetime.datetime(1973, 3, 11, 6, 30, 45),
datetime.date(1975, 1, 5)
)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_dates):
url = '/datetest{0}'.format(i)
app.add_url_rule(url, str(i), lambda val=d: flask.jsonify(x=val))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data)['x'] == http_date(d.timetuple())
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
assert rv.data == b'3'
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
assert rv == u'"\\u003c/script\\u003e"'
assert type(rv) == text_type
rv = render('{{ "</script>"|tojson }}')
assert rv == '"\\u003c/script\\u003e"'
rv = render('{{ "<\0/script>"|tojson }}')
assert rv == '"\\u003c\\u0000/script\\u003e"'
rv = render('{{ "<!--<script>"|tojson }}')
assert rv == '"\\u003c!--\\u003cscript\\u003e"'
rv = render('{{ "&"|tojson }}')
assert rv == '"\\u0026"'
rv = render('{{ "\'"|tojson }}')
assert rv == '"\\u0027"'
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
assert rv == '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>'
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
assert rv.data == b'"<42>"'
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
assert rv.status_code == 200
assert rv.data == u'정상처리'.encode('utf-8')
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
assert app.config['JSON_SORT_KEYS'] == True
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
sorted_by_str = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo"',
'}',
'}'
]
sorted_by_int = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
]
try:
assert lines == sorted_by_int
except AssertionError:
assert lines == sorted_by_str
class TestSendfile(object):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert rv.mimetype == 'text/html'
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
assert rv.data == f.read()
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
assert rv.mimetype == 'text/html'
rv.close()
def test_send_file_object(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'), mode='rb')
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
assert rv.data == f.read()
assert rv.mimetype == 'text/html'
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
assert rv.mimetype == 'text/html'
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = False
with app.test_request_context():
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'application/octet-stream'
rv.close()
# etags
assert len(captured) == 1
with catch_deprecation_warnings() as captured:
class PyStringIO(object):
def __init__(self, *args, **kwargs):
self._io = StringIO(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._io, name)
f = PyStringIO('Test')
f.name = 'test.txt'
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# attachment_filename and etags
assert len(captured) == 3
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# etags
assert len(captured) == 1
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
assert 'x-sendfile' not in rv.headers
rv.close()
# etags
assert len(captured) == 1
def test_attachment(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
rv.close()
# mimetypes + etag
assert len(captured) == 2
with app.test_request_context():
assert options['filename'] == 'index.html'
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.html'
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
assert rv.mimetype == 'text/plain'
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.txt'
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
def test_send_from_directory(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
rv = flask.send_from_directory('static', 'hello.txt')
rv.direct_passthrough = False
assert rv.data.strip() == b'Hello Subdomain'
rv.close()
def test_send_from_directory_bad_request(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
with pytest.raises(BadRequest):
flask.send_from_directory('static', 'bad\x00')
class TestLogging(object):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
assert app.logger is logger1
assert logger1.name == __name__
app.logger_name = __name__ + '/test_logger_cache'
assert app.logger is not logger1
def test_debug_log(self, capsys):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
c.get('/')
out, err = capsys.readouterr()
assert 'WARNING in test_helpers [' in err
assert os.path.basename(__file__.rsplit('.', 1)[0] + '.py') in err
assert 'the standard library is dead' in err
assert 'this is a debug statement' in err
with pytest.raises(ZeroDivisionError):
c.get('/exc')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
assert app.logger.level == 10
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
err = out.getvalue()
assert 'Exception on / [GET]' in err
assert 'Traceback (most recent call last):' in err
assert '1 // 0' in err
assert 'ZeroDivisionError:' in err
def test_processor_exceptions(self):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _anchor='x y') == '/#x%20y'
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
pytest.raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
assert flask.url_for('myview', _method='GET') == '/myview/'
assert flask.url_for('myview', id=42, _method='GET') == '/myview/42'
assert flask.url_for('myview', _method='POST') == '/myview/create'
class TestNoImports(object):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmpdir):
modules_tmpdir.join('importerror.py').write('raise NotImplementedError()')
try:
flask.Flask('importerror')
except NotImplementedError:
assert False, 'Flask(import_name) is importing import_name.'
class TestStreaming(object):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate(hello):
yield hello
yield flask.request.args['name']
yield '!'
return flask.Response(generate('Hello '))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
assert called == [42]
| bsd-3-clause | -2,680,392,947,583,573,500 | 35.399177 | 105 | 0.508461 | false |
aromanovich/kozmic-ci | tests/__init__.py | 2 | 2864 | import os
import collections
import sqlalchemy
from alembic.config import Config
from alembic.command import upgrade as alembic_upgrade
from flask.ext.webtest import TestApp, get_scopefunc
from kozmic import create_app, db
from . import factories
class SQLAlchemyMixin(object):
@property
def db(self):
return self.app.extensions['sqlalchemy'].db
def create_database(self, use_migrations=True):
self.db.session = self.db.create_scoped_session({
'scopefunc': get_scopefunc(),
})
self.db.session.execute('SET storage_engine=InnoDB;')
if use_migrations:
try:
self.db.session.execute('TRUNCATE alembic_version;')
except sqlalchemy.exc.ProgrammingError:
self.db.session.rollback()
config = Config('migrations/alembic.ini', 'alembic')
alembic_upgrade(config, 'head')
else:
self.db.create_all()
def drop_database(self):
self.db.drop_all()
class SQLAlchemyFixtureMixin(object):
def get_fixtures(self):
return getattr(self, 'FIXTURES', [])
def load_fixtures(self):
for fixture in self.get_fixtures():
if callable(fixture):
models_to_merge = fixture()
if isinstance(models_to_merge, db.Model):
models_to_merge = [models_to_merge]
elif isinstance(fixture, collections.Iterable):
models_to_merge = fixture
elif isinstance(fixture, self.db.Model):
models_to_merge = [fixture]
else:
raise Exception(
'Don\'t know how to handle fixture of {} type: {}.'.format(
type(fixture), fixture))
for model in models_to_merge:
self.db.session.merge(model)
self.db.session.commit()
self.db.session.remove()
class WebTestMixin(object):
def create_app(self):
config = os.environ.get('KOZMIC_CONFIG', 'kozmic.config.TestingConfig')
return create_app(config)
def setup_app_and_ctx(self):
self.app = self.create_app()
self.ctx = self.app.app_context()
self.ctx.push()
self.w = TestApp(self.app)
def teardown_app_and_ctx(self):
self.ctx.pop()
def login(self, user_id):
with self.w.session_transaction() as sess:
sess['user_id'] = user_id
class TestCase(WebTestMixin, SQLAlchemyMixin, SQLAlchemyFixtureMixin):
def setup_method(self, method):
self.setup_app_and_ctx()
self.drop_database()
self.create_database()
factories.setup(self.db.session)
self.load_fixtures()
def teardown_method(self, method):
self.db.session.rollback()
factories.reset()
self.teardown_app_and_ctx()
| bsd-3-clause | -1,925,775,605,831,388,200 | 30.130435 | 79 | 0.599162 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/xcb-util-keysyms/package.py | 5 | 2044 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XcbUtilKeysyms(AutotoolsPackage):
"""The XCB util modules provides a number of libraries which sit on top
of libxcb, the core X protocol library, and some of the extension
libraries. These experimental libraries provide convenience functions
and interfaces which make the raw X protocol more usable. Some of the
libraries also provide client-side code which is not strictly part of
the X protocol but which have traditionally been provided by Xlib."""
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-util-keysyms-0.4.0.tar.gz"
version('0.4.0', '2decde7b02b4b3bde99a02c17b64d5dc')
depends_on('[email protected]:')
depends_on('[email protected]:', type='build')
depends_on('pkgconfig', type='build')
| lgpl-2.1 | 2,172,519,143,931,209,500 | 45.454545 | 79 | 0.691292 | false |
frouty/rt5100rs232 | parse_11.py | 1 | 1126 | # -*- coding: utf-8 -*-
import re
import sys
#constant
# i faut que j'arrive à récuperer dans un tuple les lignes entre deux @"
try:
#open the fil
file=open('/home/lof/rt5100rs232/tmp.log','r')
for line in file.readlines():
print 'raw line: {}'.format(line)
if line.startswith("2016"):
print 'it start with 2016'
print 'line.strip():%s' %(line.strip())
print 'line.split():%s' %(line.split())
atpos=line.find('@')
print 'atpos:%s' % (atpos)
print "Will try to find the ascii character start of text : \02"
if line.find('\02') != -1:
print 'find it'
print 'line:{}'.format(line[line.find('\02')+1:])
else:
print "didn't find it"
print 'find : {}'.format(line.find('\x02'))
print 'strip: {}'.format(line.rstrip('\x02'))
#print 'rstrip line:{}'.format(line.rstrip(line.find('\x02')+1))
print 'find : {}'.format(line.find('O'))
print '-'*6
else: print 'nop'
except IOError, (error,strerror):
print "I/O Error(%s): %s" % (error,strerror)
| gpl-3.0 | -1,609,619,639,211,035,000 | 34.125 | 73 | 0.543594 | false |
vicenteneto/flask-ci | setup.py | 1 | 1475 | """
Flask-CI
--------
Provide Continuous Integration support operations for Flask apps.
Links
`````
* `documentation <https://pythonhosted.org/Flask-CI>`_
* `development version <http://github.com/vicenteneto/flask-ci/zipball/master#egg=Flask-CI-dev>`_
"""
from setuptools import setup
version = '1.2.9.1'
packages = ['flask_ci', 'flask_ci.tasks', 'flask_ci.util']
install_requires = ['Coverage', 'Flask-Script', 'Nose', 'Pep8', 'Pylint', 'Pylint-Flask']
flask_ci_pkg_data = ['tasks/pylint.rc']
setup(
name='Flask-CI',
version=version,
description='Continuous Integration support for Flask',
url='https://github.com/vicenteneto/flask-ci',
author='Vicente Neto',
author_email='[email protected]',
license='MIT',
packages=packages,
install_requires=install_requires,
package_data={'flask_ci': flask_ci_pkg_data},
zip_safe=False,
keywords=['ci', 'jenkins', 'hudson', 'flask', 'pylint', 'pep8', 'coverage', 'nose'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
]
)
| mit | 2,279,425,250,348,792,800 | 31.777778 | 97 | 0.64 | false |
pydanny/dj-stripe | djstripe/middleware.py | 1 | 3290 | """
dj-stripe middleware
Refer to SubscriptionPaymentMiddleware docstring for more info.
"""
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.urls import resolve
from django.utils.deprecation import MiddlewareMixin
from .utils import subscriber_has_active_subscription
class SubscriptionPaymentMiddleware(MiddlewareMixin):
"""
Used to redirect users from subcription-locked request destinations.
Rules:
- "(app_name)" means everything from this app is exempt
- "[namespace]" means everything with this name is exempt
- "namespace:name" means this namespaced URL is exempt
- "name" means this URL is exempt
- The entire djstripe namespace is exempt
- If settings.DEBUG is True, then django-debug-toolbar is exempt
- A 'fn:' prefix means the rest of the URL is fnmatch'd.
Example:
```py
DJSTRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS = (
"[blogs]", # Anything in the blogs namespace
"products:detail", # A ProductDetail view you want shown to non-payers
"home", # Site homepage
"fn:/accounts*", # anything in the accounts/ URL path
)
```
"""
def process_request(self, request):
"""Check the subscriber's subscription status.
Returns early if request does not outlined in this middleware's docstring.
"""
if self.is_matching_rule(request):
return
return self.check_subscription(request)
def is_matching_rule(self, request):
"""Check according to the rules defined in the class docstring."""
# First, if in DEBUG mode and with django-debug-toolbar, we skip
# this entire process.
from .settings import SUBSCRIPTION_REQUIRED_EXCEPTION_URLS
if settings.DEBUG and request.path.startswith("/__debug__"):
return True
exempt_urls = list(SUBSCRIPTION_REQUIRED_EXCEPTION_URLS) + ["[djstripe]"]
# Second we check against matches
match = resolve(
request.path, getattr(request, "urlconf", settings.ROOT_URLCONF)
)
if "({0})".format(match.app_name) in exempt_urls:
return True
if "[{0}]".format(match.namespace) in exempt_urls:
return True
if "{0}:{1}".format(match.namespace, match.url_name) in exempt_urls:
return True
if match.url_name in exempt_urls:
return True
# Third, we check wildcards:
for exempt in [x for x in exempt_urls if x.startswith("fn:")]:
exempt = exempt.replace("fn:", "")
if fnmatch.fnmatch(request.path, exempt):
return True
return False
def check_subscription(self, request):
"""Redirect to the subscribe page if the user lacks an active subscription."""
from .settings import SUBSCRIPTION_REDIRECT, subscriber_request_callback
subscriber = subscriber_request_callback(request)
if not subscriber_has_active_subscription(subscriber):
if not SUBSCRIPTION_REDIRECT:
raise ImproperlyConfigured("DJSTRIPE_SUBSCRIPTION_REDIRECT is not set.")
return redirect(SUBSCRIPTION_REDIRECT)
| bsd-3-clause | -1,938,763,304,425,209,300 | 32.917526 | 88 | 0.659271 | false |
dvalters/HAIL-CAESAR | test/basic_kga_tests.py | 1 | 2099 | import pytest
import json
import rasterio
import numpy.testing as ntest
from numpy import loadtxt
"""
Python testing framework for the HAIL-CAESAR model
Regression/functional tests for standard test cases
of HAIL-CAESAR, comparing to Known Good Answers (KGA's)
Based on Stuart Grieve's LSDTopoTools test framework.
"""
def raster(filename):
"""
Returns a numpy array from a filename for later diffing
"""
out_data = rasterio.open(filename)
return out_data.read(1)
def timeseries(filename, col_number):
"""
Returns a 1D array from the filename specifying the timeseries
file (output.dat) i.e. hydrographs, sedigraphs.
"""
out_data = loadtxt(filename, usecols=col_number)
return out_data
# @pytest.fixture
def rasters_params():
"""
Pytest fixture that reads in paths containing results/KGAs
and returns them as a list of params. (For rasters)
"""
with open('known_good_answers/rasters.json') as f:
fixtures = json.loads(f.read())
params = []
for fix in fixtures:
params.append(pytest.param(raster(fixtures[fix]['result']),
raster(fixtures[fix]['expected']),
id=fix))
return params
def timeseries_params():
"""
Fixture for the timeseries files
"""
with open('known_good_answers/timeseries.json') as f:
fixtures = json.loads(f.read())
params = []
for ts in fixtures:
params.append(pytest.param(timeseries(fixtures[ts]['result'], 1),
timeseries(fixtures[ts]['expected'], 1),
id=ts))
return params
class TestingHAILCAESAR():
@staticmethod
@pytest.mark.parametrize('result,expected', rasters_params())
def test_water_depths(result, expected):
ntest.assert_allclose(result, expected, rtol=1e-03)
@staticmethod
@pytest.mark.parametrize('result,expected', timeseries_params())
def test_hydrograph_lisflood(result, expected):
ntest.assert_allclose(result, expected, rtol=1e-03)
| gpl-3.0 | -9,197,241,406,524,184,000 | 26.986667 | 75 | 0.642687 | false |
yokose-ks/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 10 | 5710 | import json
import logging
from lxml import etree
from xblock.fields import Integer, Scope
from xblock.fragment import Fragment
from pkg_resources import resource_string
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(help="Date that this problem is due by", scope=Scope.settings)
extended_due = Date(
help="Date that this problem is due by for a particular student. This "
"can be set by an instructor, and will override the global due "
"date if it is set to a date that is later than the global due "
"date.",
default=None,
scope=Scope.user_state,
)
class SequenceModule(SequenceFields, XModule):
''' Layout module which lays out content in a temporal sequence
'''
js = {'coffee': [resource_string(__name__,
'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')]}
css = {'scss': [resource_string(__name__, 'css/sequence/display.scss')]}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# if position is specified in system, then use that instead
if getattr(self.system, 'position', None) is not None:
self.position = int(self.system.position)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
self.position = int(data['position'])
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
for child in self.get_display_items():
progress = child.get_progress()
rendered_child = child.render('student_view', context)
fragment.add_frag_resources(rendered_child)
titles = child.get_content_titles()
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.id,
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default
contents.append(childinfo)
params = {'items': contents,
'element_id': self.location.html_id(),
'item_id': self.id,
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template('seq_module.html', params))
return fragment
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, MakoModuleDescriptor, XmlDescriptor):
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
js = {'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')]}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
| agpl-3.0 | -297,502,745,468,057,500 | 36.565789 | 91 | 0.60683 | false |
yugangw-msft/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureResource/autorestresourceflatteningtestservice/auto_rest_resource_flattening_test_service.py | 2 | 17142 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from msrest.pipeline import ClientRawResponse
import uuid
from . import models
class AutoRestResourceFlatteningTestServiceConfiguration(AzureConfiguration):
"""Configuration for AutoRestResourceFlatteningTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if accept_language is not None and not isinstance(accept_language, str):
raise TypeError("Optional parameter 'accept_language' must be str.")
if not base_url:
base_url = 'http://localhost'
super(AutoRestResourceFlatteningTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestresourceflatteningtestservice/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.accept_language = accept_language
self.long_running_operation_retry_timeout = long_running_operation_retry_timeout
self.generate_client_request_id = generate_client_request_id
class AutoRestResourceFlatteningTestService(object):
"""Resource Flattening for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestResourceFlatteningTestServiceConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
self.config = AutoRestResourceFlatteningTestServiceConfiguration(credentials, accept_language, long_running_operation_retry_timeout, generate_client_request_id, base_url, filepath)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def put_array(
self, resource_array=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as an Array.
:param resource_array: External Resource as an Array to put
:type resource_array: list of :class:`Resource
<fixtures.acceptancetestsazureresource.models.Resource>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_array is not None:
body_content = self._serialize.body(resource_array, '[Resource]')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_array(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as an Array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`FlattenedProduct
<fixtures.acceptancetestsazureresource.models.FlattenedProduct>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FlattenedProduct]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_dictionary(
self, resource_dictionary=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a Dictionary.
:param resource_dictionary: External Resource as a Dictionary to put
:type resource_dictionary: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_dictionary is not None:
body_content = self._serialize.body(resource_dictionary, '{FlattenedProduct}')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_dictionary(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a Dictionary.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{FlattenedProduct}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_resource_collection(
self, resource_complex_object=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a ResourceCollection.
:param resource_complex_object: External Resource as a
ResourceCollection to put
:type resource_complex_object: :class:`ResourceCollection
<fixtures.acceptancetestsazureresource.models.ResourceCollection>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_complex_object is not None:
body_content = self._serialize.body(resource_complex_object, 'ResourceCollection')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_resource_collection(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a ResourceCollection.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceCollection
<fixtures.acceptancetestsazureresource.models.ResourceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsazureresource.models.ErrorException>`
"""
# Construct URL
url = '/azure/resource-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -4,734,232,642,262,146,000 | 41.74813 | 188 | 0.667017 | false |
manjunaths/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 18 | 5883 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| apache-2.0 | 5,403,842,560,407,463,000 | 37.703947 | 85 | 0.666497 | false |
mahmoodkhan/ranewal | htdocs/epro/models.py | 1 | 29987 | from __future__ import unicode_literals
import datetime, time, logging
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ValidationError
from django.db.models import Q, Sum, Max, Min, Count
from django.db import models
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.timezone import utc
from django.contrib.auth.models import User
from djangocosign.models import UserProfile, Region, Country, Office
from .fields import USDCurrencyField
class ModelDiffMixin(object):
"""
A model mixin that tracks model fields' values and provide some useful api
to know what fields have been changed.
Usage:
>>> p = Place()
>>> p.has_changed
False
>>> p.changed_fields
[]
>>> p.rank = 42
>>> p.has_changed
True
>>> p.changed_fields
['rank']
>>> p.diff
{'rank': (0, 42)}
>>> p.categories = [1, 3, 5]
>>> p.diff
{'categories': (None, [1, 3, 5]), 'rank': (0, 42)}
>>> p.get_field_diff('categories')
(None, [1, 3, 5])
>>> p.get_field_diff('rank')
(0, 42)
>>>
"""
def __init__(self, *args, **kwargs):
super(ModelDiffMixin, self).__init__(*args, **kwargs)
self.__initial = self._dict
@property
def diff(self):
d1 = self.__initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
return self.diff.keys()
def get_field_diff(self, field_name):
"""
Returns a diff for field if it's changed and None otherwise.
"""
return self.diff.get(field_name, None)
def save(self, *args, **kwargs):
"""
Saves model and set initial state.
"""
super(ModelDiffMixin, self).save(*args, **kwargs)
self.__initial = self._dict
@property
def _dict(self):
return model_to_dict(self, fields=[field.name for field in
self._meta.fields])
class Meta:
abstract = True
def validate_even(value):
if value % 2 != 0:
raise ValidationError('%s is not an even number' % value)
def validate_positive(value):
if value <= 0:
raise ValidationError('%s is not greater than zero' % value)
def validate_gl_account(value):
try:
if len(str(int(value))) > 4:
raise ValidationError("%s must be a four digits long number" % value)
except Exception as e:
raise ValidationError("%s must be a four digits long number" % value)
class CommonBaseAbstractModel(models.Model):
created_by = models.ForeignKey(UserProfile, blank=True, null=True, related_name="%(app_label)s_%(class)s_created")
updated_by = models.ForeignKey(UserProfile, blank=True, null=True, related_name="%(app_label)s_%(class)s_updated")
created = models.DateTimeField(editable=False, blank=True, null=True)
updated = models.DateTimeField(editable=False, blank=True, null=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
now_utc = datetime.datetime.utcnow().replace(tzinfo=utc)
if self.id:
self.updated = now_utc
else:
self.created = now_utc
super(CommonBaseAbstractModel, self).save(*args, **kwargs)
class Currency(CommonBaseAbstractModel):
country = models.ForeignKey(Country, blank=False, null=False, on_delete=models.CASCADE, related_name="currencies")
code = models.CharField(unique=True, max_length=3, null=False, blank=False)
name = models.CharField(max_length=50, null=True, blank=True)
def __unicode__(self):
return self.code
def __str__(self):
return self.code
class Meta:
verbose_name = 'Currency'
verbose_name_plural = "Currencies"
ordering = ['country', 'code']
class FundCode(CommonBaseAbstractModel):
country = models.ForeignKey(Country, related_name='fund_codes', blank=False, null=False, on_delete=models.CASCADE)
code = models.CharField(unique=True, max_length=5, null=False, blank=False, db_index=True)
def __unicode__(self):
return u'%s' % self.code
def __ustr__(self):
return u'%s' % self.code
class Meta:
verbose_name = 'Fund Code'
ordering = ['code']
class DeptCode(CommonBaseAbstractModel):
country = models.ForeignKey(Country, related_name='dept_codes', blank=False, null=False, on_delete=models.CASCADE)
code = models.CharField(unique=True, max_length=5, null=False, blank=False, db_index=True)
def __unicode__(self):
return u'%s' % self.code
def __str__(self):
return '%s' % self.code
class Meta:
verbose_name = 'Department Code'
ordering = ['code']
class LinCode(CommonBaseAbstractModel):
country = models.ForeignKey(Country, related_name='lin_codes', blank=False, null=False, on_delete=models.CASCADE)
lin_code = models.CharField(unique=True, max_length=9, null=True, blank=True)
def __unicode__(self):
return u'%s' % self.lin_code
def __str__(self):
return '%s' % self.lin_code
class Meta:
verbose_name = 'LIN Code'
ordering = ['lin_code']
class ActivityCode(CommonBaseAbstractModel):
country = models.ForeignKey(Country, related_name='activity_codes', blank=False, null=False, on_delete=models.CASCADE)
activity_code = models.CharField(unique=True, max_length=9, null=True, blank=True)
def __unicode__(self):
return u'%s' % self.activity_code
def __str__(self):
return '%s' % self.activity_code
class Meta:
verbose_name = 'Activity Code'
ordering = ['activity_code']
class PurchaseRequestStatus(CommonBaseAbstractModel):
""" A PR goes through these statuses.
STATUS_DRAFT = 'drafted'
STATUS_PENDING_PROCUREMENT_VERIFICATION = 'Procurement Verified'
STATUS_PENDING_APPROVAL = 'Approved'
STATUS_PENDING_APPROVAL = 'Approved II'
STATUS_PENDING_FINANCIAL_REVIEW = 'Finance Reviewed'
STATUS_ONGOING = 'Open'
STATUS_COMPLETED = 'completed'
STATUS_ONHOLD = 'onhold'
STATUS_CANCELED = 'canceled'
STATUS_REJECTED = 'rejected'
"""
status = models.CharField(max_length=50, null=False, blank=False)
def __unicode__(self):
return u'%s' % self.status
def __str__(self):
return '%s' % self.status
class Meta:
verbose_name = 'Purchase Request Status'
verbose_name_plural = "Status"
ordering = ['status']
class PurchaseRequestManager(models.Manager):
@property
def goods(self):
return self.get_query_set().filter(pr_type=PurchaseRequest.TYPE_GOODS)
@property
def services(self):
return self.get_query_set().filter(pr_type=PurchaseRequest.TYPE_SERVICES)
class PurchaseRequest(CommonBaseAbstractModel):
TYPE_GOODS = '0'
TYPE_SERVICES = '1'
PR_TYPE_CHOICES = (
(TYPE_GOODS, 'Goods'),
(TYPE_SERVICES, 'Services'),
)
EXPENSE_TYPE_PROGRAM = '0'
EXPENSE_TYPE_OPERATIONAL = '1'
EXPENSE_TYPE_CHOICES = (
(EXPENSE_TYPE_PROGRAM, 'Program'),
(EXPENSE_TYPE_OPERATIONAL, 'Operational'),
)
country = models.ForeignKey(Country, related_name='prs', null=False, blank=False, on_delete=models.CASCADE, help_text="<span style='color:red'>*</span> The country in which this PR is originated")
office = models.ForeignKey(Office, related_name='prs', null=True, blank=True, on_delete=models.DO_NOTHING, help_text="<span style='color:red'>*</span> The Office in which this PR is originated")
sno = models.PositiveIntegerField(verbose_name='SNo', null=False, blank=False)
currency = models.ForeignKey(Currency, related_name='prs', null=True, blank=True, on_delete=models.CASCADE, help_text="<span style='color:red'>*</span> This the currency in which the transaction occurs.")
dollar_exchange_rate = models.DecimalField(max_digits=10, decimal_places=2, validators=[MinValueValidator(0.0)], null=False, blank=False, help_text="<span style='color:red'>*</span> This exchange rate may be different on the PR submission day.")
delivery_address = models.CharField(max_length=100, blank=False, null=False, help_text="<span style='color:red'>*</span> The delivery address sould be as specific as possible.")
project_reference = models.CharField(max_length=140, null=False, blank=False, help_text="<span style='color:red'>*</span> Project Reference is a brief summary of the purpose of this PR")
required_date = models.DateField(auto_now=False, auto_now_add=False, null=False, blank=False, help_text="<span style='color:red'>*</span> The required date by which this PR should be fullfilled.")
originator = models.ForeignKey(UserProfile, related_name='prs', on_delete=models.DO_NOTHING)
origination_date = models.DateField(auto_now=False, auto_now_add=True)
procurement_review_by = models.ForeignKey(UserProfile, related_name='pr_procurement_verifier', blank=True, null=True, on_delete=models.DO_NOTHING)
procurement_review_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
approver1 = models.ForeignKey(UserProfile, blank=True, null=True, related_name='pr_approvers1',
on_delete=models.SET_NULL,
help_text="<span style='color:red'>*</span> This is the person who manages the Fund")
approval1_date = models.DateField(auto_now=False, blank=True, null=True, auto_now_add=False)
approver2 = models.ForeignKey(UserProfile, blank=True, null=True, related_name='pr_approver2',
on_delete=models.SET_NULL,
help_text="Refer to your <abbr title='Approval Authority Matrix'>AAM</abbr> to determine if you need to specify a second approval.")
approval2_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
finance_reviewer = models.ForeignKey(UserProfile, blank=True, null=True, related_name='pr_reviewer',
on_delete=models.SET_NULL)
finance_review_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
submission_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
status = models.ForeignKey(PurchaseRequestStatus, blank=False, null=False, on_delete=models.DO_NOTHING)
status_notes = models.TextField(null=True, blank=True)
pr_type = models.CharField(max_length=50, choices=PR_TYPE_CHOICES, default=TYPE_GOODS, null=True, blank=True)
expense_type = models.CharField(max_length=50, choices=EXPENSE_TYPE_CHOICES, null=True, blank=True)
processing_office = models.ForeignKey(Office, related_name='pr_processing_office', blank=True, null=True, on_delete=models.SET_NULL)
assignedBy = models.ForeignKey(UserProfile, blank=True, null=True, related_name='assigner', on_delete=models.SET_NULL)
assignedTo = models.ForeignKey(UserProfile, blank=True, null=True, related_name='assignee', on_delete=models.SET_NULL)
assigned_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
notes = models.TextField(null=True, blank=True)
preferred_supplier = models.BooleanField(default=False)
cancellation_requested_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True)
cancellation_requested_by = models.ForeignKey(UserProfile, null=True, blank=True, related_name='cancellation_requested_by', on_delete=models.SET_NULL)
cancelled_by = models.ForeignKey(UserProfile, null=True, blank=True, related_name='pr_cancelled_by', on_delete=models.SET_NULL)
cancellation_date = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null=True)
objects = PurchaseRequestManager() # Changing the default manager
def __unicode__(self):
return '%s-%s: %s' % (self.office.name, self.sno, self.project_reference)
def __str__(self):
return '%s-%s: %s' % (self.office.name, self.sno, self.project_reference)
def get_absolute_url(self):
# Redirect to this URl after an object is created using CreateView
return reverse_lazy('pr_view', kwargs={'pk': self.pk}) #args=[str(self.id)])
def clean(self):
# Don't allow draft purchase_requests to have a submission_date
if self.pk and self.status == 'draft' and self.submission_date is not None:
raise ValidationError(_('Draft Purchase Requests may not have a submission date.'))
def save(self, *args, **kwargs):
if not self.id:
status = PurchaseRequestStatus.objects.get(status='Drafted')
self.status = status
self.type = PurchaseRequest.TYPE_GOODS
# increase the PR serial number by one for by office
pr_count_by_office = PurchaseRequest.objects.filter(office=self.office.pk).aggregate(Max('sno'))['sno__max']
if pr_count_by_office is None:
pr_count_by_office = 0
pr_count_by_office = pr_count_by_office + 1
self.sno = pr_count_by_office
super(PurchaseRequest, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Purchase Request'
verbose_name_plural = "Purchase Requests"
ordering = ['country', 'office']
get_latest_by = "submission_date"
class PurchaseRequestLog(CommonBaseAbstractModel):
pr = models.ForeignKey(PurchaseRequest, related_name='log_entries', on_delete=models.CASCADE)
reference = models.CharField(max_length=255, null=True, blank=True)
old_value = models.CharField(max_length=255, null=True, blank=True)
new_value = models.CharField(max_length=255, null=True, blank=True)
description = models.CharField(max_length=255, null=True, blank=True)
manual_entry = models.BooleanField(default=False)
changed_by = models.ForeignKey(UserProfile, null=True, blank=False, related_name='log_entries', on_delete=models.DO_NOTHING)
class Vendor(CommonBaseAbstractModel):
country = models.ForeignKey(Country, related_name='vendors', null=True, blank=False, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, null=False, blank=False)
description = models.CharField(max_length=255, null=True, blank=True)
contact_person = models.CharField(max_length=100, null=True, blank=True)
address = models.CharField(max_length=100, null=True, blank=True)
phone = models.CharField(max_length=25, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
black_listed = models.BooleanField(default=False)
reason_black_listed = models.CharField(max_length=255, null=True, blank=True)
black_listed_date = models.DateField(null=True, blank=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('vendor', kwargs={'pk': self.pk}) #args=[str(self.id)])
class Meta:
verbose_name = 'Vendor'
class Unit(CommonBaseAbstractModel):
mnemonic = models.CharField(max_length=4, null=False, blank=False)
description = models.CharField(max_length=100, null=True, blank=True)
def __unicode__(self):
return "%s - %s" % (self.mnemonic, self.description)
def __str__(self):
return "%s - %s" % (self.mnemonic, self.description)
class Meta:
verbose_name = "Unit"
ordering = ["description"]
class Item(CommonBaseAbstractModel):
item_sno = models.PositiveIntegerField(verbose_name='SNo')
purchase_request = models.ForeignKey(PurchaseRequest,
related_name='items',
on_delete=models.CASCADE)
quantity_requested = models.PositiveIntegerField(validators=[MinValueValidator(0.0)], verbose_name='Quantity')
#unit = models.CharField(max_length=20, null=False, blank=False)
unit = models.ForeignKey(Unit, related_name='units', null=False, blank=False, on_delete=models.DO_NOTHING)
description_pr = models.TextField(null=False, blank=False, verbose_name='Description',
help_text='Provide detailed description')
description_po = models.TextField(null=False, blank=True)
price_estimated_local = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Price',
help_text='Price of one unit in PR currency',)
price_estimated_usd = USDCurrencyField(verbose_name='Price USD', help_text='Price of one unit in US Dollars')
price_estimated_local_subtotal = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Price Subtotal',
default=Decimal('0.00'),)
price_estimated_usd_subtotal = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Price estimated in US Dollars Subtotal',)
default_finance_codes = models.BooleanField(default=False)
def __unicode__(self):
return u'%s' % (self.description_pr)
def __str__(self):
return '%s' % (self.description_pr)
def save(self, *args, **kwargs):
if not self.description_po and self.description_pr:
self.description_po = self.description_pr
self.price_estimated_local_subtotal = round(self.price_estimated_local * self.quantity_requested,2)
self.price_estimated_usd = round(self.price_estimated_local / self.purchase_request.dollar_exchange_rate, 2)
self.price_estimated_usd_subtotal = round(self.price_estimated_usd * self.quantity_requested, 2)
if not self.id:
# increase the item serial number by one for the current Purchase Request
items_count_by_pr = Item.objects.filter(purchase_request=self.purchase_request.pk).aggregate(Max('item_sno'))['item_sno__max']
if items_count_by_pr is None:
items_count_by_pr = 0
items_count_by_pr = items_count_by_pr + 1
self.item_sno = items_count_by_pr
super(Item, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse_lazy('pr_view', kwargs={'pk': self.purchase_request.pk}) #args=[str(self.id)])
@property
def allocation_percent_total(self):
total = self.finance_codes.all().aggregate(Sum('allocation_percent'))['allocation_percent__sum']
return total
class Meta:
verbose_name = 'Item'
ordering = ['purchase_request']
#order_with_respect_to = 'purchase_request'
def upload_path_handler(instance, filename):
return "purchase_request/{office}/pr_{pr_pk}/item_{item_id}/{file}".format(office=instance.item.purchase_request.office.name, pr_pk=instance.item.purchase_request.pk, item_id=instance.item.id, file=filename)
class ItemAttachment(CommonBaseAbstractModel):
item = models.ForeignKey(Item, blank=False, null=False)
file = models.FileField(upload_to=upload_path_handler)
file_type = models.CharField(max_length=100, null=True, blank=True)
file_size = models.PositiveIntegerField(null=True, blank=True)
class Meta:
verbose_name = "Attachment"
ordering = ["item",]
def get_absolute_url(self):
return reverse_lazy('item_attachment', kwargs={'pk': self.item.purchase_request.pk})
class FinanceCodes(CommonBaseAbstractModel):
item = models.ForeignKey(Item, related_name='finance_codes', null=False, blank=False)
gl_account = models.PositiveIntegerField(validators=[validate_positive, validate_gl_account,], null=False, blank=False)
fund_code = models.ForeignKey(FundCode, null=False, blank=False)
dept_code = models.ForeignKey(DeptCode, null=False, blank=False)
office_code = models.ForeignKey(Office, null=False, blank=False)
lin_code = models.ForeignKey(LinCode, blank=True, null=True)
activity_code = models.ForeignKey(ActivityCode, blank=True, null=True)
employee_id = models.PositiveIntegerField(validators=[validate_positive,], null=True, blank=True)
allocation_percent = models.DecimalField(max_digits=5, decimal_places=2,
validators=[MaxValueValidator(100.00), MinValueValidator(1.00) ],
blank=False, null=False,
default=Decimal("100.00"))
def __unicode__(self):
return "%s-%s" % (self.gl_account, str(self.fund_code))
def __str__(self):
return "%s-%s" % (self.gl_account, str(self.fund_code))
def get_absolute_url(self):
return reverse_lazy('pr_view', kwargs={'pk': self.item.purchase_request.pk})
class QuotationAnalysis(CommonBaseAbstractModel):
analysis_date = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
delivery_date = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
selected_vendor = models.ForeignKey(Vendor, null=True, blank=True, related_name='qutoation_analyses')
justification = models.TextField(null=True, blank=True)
notes = models.TextField(null=True, blank=True)
class RequestForQuotation(CommonBaseAbstractModel):
purchase_request = models.ForeignKey(PurchaseRequest, related_name='rfqs', on_delete=models.CASCADE)
vendor = models.ForeignKey(Vendor, related_name='rfqs', on_delete=models.SET_NULL, null=True, blank=True)
date_submitted_to_vendor = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
date_received_from_vendor = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
insurance = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
default=Decimal('0.00'),)
shipping_and_handling = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
default=Decimal('0.00'),)
vat = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
default=Decimal('0.00'),)
meets_specs = models.BooleanField(default=False)
meets_compliance = models.BooleanField(default=False)
complete_order_delivery_date = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
complete_order_payment_terms = models.CharField(max_length=255, null=True, blank=True)
notes = models.TextField(blank=True, null=True)
quotation_analysis = models.ForeignKey(QuotationAnalysis, related_name='rfqs', null=True, blank=True)
def save(self, *args, **kwargs):
self.complete_order_delivery_date = self.rfq_items.aggregate(Max('delivery_date'))
super(RequestForQuotation, self).save(*args, **kwargs)
class RequestForQuotationItem(CommonBaseAbstractModel):
request_for_quotation = models.ForeignKey(RequestForQuotation, related_name='rfq_items')
item = models.ForeignKey(Item, related_name='request_for_quotation_items')
quoted_price_local_currency = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
default=Decimal('0.00'),)
quoted_price_local_currency_subtotal = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
default=Decimal('0.00'),)
payment_terms = models.CharField(max_length=255, null=True, blank=True)
delivery_date = models.DateField(null=True, blank=True, auto_now=False, auto_now_add=False)
warranty = models.CharField(max_length=255, null=True, blank=True)
validity_of_offer = models.CharField(max_length=255, null=True, blank=True)
origin_of_goods = models.CharField(max_length=255, null=True, blank=True)
remarks = models.CharField(max_length=255, null=True, blank=True)
class PurchaseOrder(CommonBaseAbstractModel):
purchase_request = models.ForeignKey(PurchaseRequest,
related_name='purchase_orders',
on_delete=models.CASCADE)
country = models.ForeignKey(Country, related_name='purchase_orders', on_delete=models.CASCADE)
office = models.ForeignKey(Office, related_name='purchase_orders', on_delete=models.DO_NOTHING)
currency = models.ForeignKey(Currency, related_name='purchase_orders',
on_delete=models.SET_NULL, null=True, blank=True)
po_issued_date = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=True)
vendor = models.ForeignKey(Vendor, related_name='purchase_orders', on_delete=models.DO_NOTHING, null=True, blank=True)
expected_delivery_date = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=True)
#https://docs.djangoproject.com/en/1.8/topics/db/models/#extra-fields-on-many-to-many-relationships
items = models.ManyToManyField(Item, through='PurchaseOrderItem')
notes = models.TextField(null=False, blank=True)
total_local = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Total price in PR currency ',
default=Decimal('0.00'),)
total_usd = USDCurrencyField(verbose_name='Total USD', help_text='Total Price in US Dollars')
quotation_analysis = models.ForeignKey(QuotationAnalysis, related_name='purchase_orders', null=True, blank=True)
def save(self, *args, **kwargs):
self.total_local = self.purchase_order_items.Aggregate(Sum(price_subtotal_local))
self.total_usd = self.purchase_order_items.Aggregate(Sum(price_subtotal_usd))
if self.quotation_analysis:
self.vendor = self.quotation_analysis.selected_vendor
self.expected_delivery_date = self.quotation_analysis.delivery_date
super(PurchaseOrder, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Purchase Order'
ordering = ['purchase_request', ]
class PurchaseOrderItem(CommonBaseAbstractModel):
"""
A through table for the m2m relationship b/w PurchaseOrder and Item with additional fields.
"""
purchase_order = models.ForeignKey(PurchaseOrder, related_name='purchase_order_items')
item = models.ForeignKey(Item, related_name='purchase_order_items')
quantity_ordered = models.PositiveIntegerField(validators=[MinValueValidator(0.0)],null=False, blank=False)
price_local = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Price in PR currency',
help_text='Price of one unit in PR currency',
default=Decimal('0.00'),
blank=False, null=False)
price_usd = USDCurrencyField(verbose_name='Price USD', help_text='Price of one unit in US Dollars', blank=False, null=False)
price_subtotal_local = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0.0)],
verbose_name='Subtotal PR currency',
default=Decimal('0.00'),)
price_subtotal_usd = USDCurrencyField(verbose_name='Subtal US Dollars')
def save(self, *args, **kwargs):
self.price_subtotal_local = self.price_local * self.quantity_ordered
self.price_subtotal_usd = self.price_usd * self.quantity_ordered
super(PurchaseOrderItems, self).save(*args, **kwargs)
class GoodsReceivedNote(CommonBaseAbstractModel):
purchase_request = models.ForeignKey(PurchaseRequest,
related_name='goods_received_notes',
on_delete=models.CASCADE)
po_number = models.PositiveIntegerField(validators=[MinValueValidator(0.0)],)
country = models.ForeignKey(Country, related_name='goods_received_notes', on_delete=models.CASCADE)
office = models.ForeignKey(Office, related_name='goods_received_notes', on_delete=models.DO_NOTHING)
received_date = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=True)
items = models.ManyToManyField(Item, through='GoodsReceivedNoteItem')
class Meta:
verbose_name = 'Goods Received Note'
ordering = ['purchase_request',]
class GoodsReceivedNoteItem(CommonBaseAbstractModel):
"""
A through table fro the m2m relationship b/w GoodsReceivedNote and Item with extra field
"""
goods_received_note = models.ForeignKey(GoodsReceivedNote,
related_name='goods_received_note_items',
on_delete=models.CASCADE)
item = models.ForeignKey(Item, related_name='goods_received_note_items', on_delete=models.CASCADE)
quantity_received = models.PositiveIntegerField(validators=[MinValueValidator(0.0)],)
class PurchaseRecord(CommonBaseAbstractModel):
# payment_voucher_num
# payment_request_date
# tender_yes_no
#
pass
| gpl-3.0 | 2,488,555,275,411,284,000 | 47.210611 | 249 | 0.657618 | false |
brchiu/tensorflow | tensorflow/python/ops/dequantize_op_test.py | 15 | 2818 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Dequantize Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DequantizeOpTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(DequantizeOpTest, self).__init__(method_name)
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
with self.cached_session():
input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = array_ops.dequantize(input_op, min_range, max_range)
tf_ans = self.evaluate(dequantized)
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
dtypes.quint8: np.uint8,
dtypes.qint8: np.int8,
dtypes.quint16: np.uint16,
dtypes.qint16: np.int16
}
self.assertTrue(dtype in type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertTrue(min_range >= v_min)
self.assertTrue(max_range <= v_max)
type_range = v_max - v_min
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)
def testBasicQuint8(self):
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 6.0, dtypes.quint8)
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 123.456, dtypes.quint8)
self._testDequantizeOp(
np.array([0, 4, 42, 108, 243]), 5.0, 200.2, dtypes.quint8)
def testBasicQint8(self):
self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)
self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)
self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
if __name__ == "__main__":
test.main()
| apache-2.0 | -7,943,743,669,092,654,000 | 36.573333 | 80 | 0.661462 | false |
wardi/datacats | setup.py | 8 | 1293 | #!/usr/bin/env python
# Copyright 2014-2015 Boxkite Inc.
# This file is part of the DataCats package and is released under
# the terms of the GNU Affero General Public License version 3.0.
# See LICENSE.txt or http://www.fsf.org/licensing/licenses/agpl-3.0.html
from setuptools import setup
from os.path import realpath, dirname
from os import chdir
import sys
install_requires = [
'setuptools',
'docopt',
'docker-py>=1.2.1',
'clint', # to output colored text to terminal
'requests>=2.5.2', # help with docker-py requirement
'lockfile',
'watchdog' # For lesscd
]
chdir(dirname(realpath(__file__)))
execfile("datacats/version.py")
setup(
name='datacats',
version=__version__,
description='CKAN Data Catalog Developer Tools built on Docker',
license='AGPL3',
author='Boxkite',
author_email='[email protected]',
url='https://github.com/datacats/datacats',
packages=[
'datacats',
'datacats.tests',
'datacats.cli',
],
install_requires=install_requires,
include_package_data=True,
test_suite='datacats.tests',
zip_safe=False,
entry_points="""
[console_scripts]
datacats=datacats.cli.main:main
datacats-lesscd=datacats.cli.lesscd:main
""",
)
| agpl-3.0 | 8,216,931,163,720,610,000 | 24.86 | 72 | 0.658933 | false |
CFSworks/ansible-routeros | library/routeros_command.py | 1 | 6318 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Sam Edwards
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: routeros_command
author: "Sam Edwards (@cfsworks)"
short_description: Run arbitrary commands on a Mikrotik RouterOS-based device
description:
- Sends an arbitrary set of commands to RouterOS and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: routeros
notes:
- Tested against RouterOS 6.38.1
options:
commands:
description:
- The commands to send to the remote RouterOS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- name: get LLDP neighbors on remote devices
routeros_command:
commands: /ip neighbor print
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from routeros_utils import routeros_argument_spec, run_commands
def to_lines(stdout):
lines = []
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict(),
), module)
items = []
for item in command(module.params['commands']):
if not item['command'].startswith('/'):
module.fail_json(msg='commands should always start with `/` to be '
'fully qualified; not executing `%s`' % item['command'])
if module.check_mode and ' print' not in item['command']:
warnings.append('only print commands are supported when using '
'check mode, not executing `%s`' % item['command'])
continue
items.append(item)
return items
def main():
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(routeros_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
commands = parse_commands(module, warnings)
if warnings:
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError:
exc = get_exception()
module.fail_json(msg=str(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': to_lines(responses)
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,102,431,266,260,530,000 | 30.277228 | 92 | 0.646407 | false |
1905410/Misago | misago/threads/validators.py | 1 | 3561 | from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _, ungettext
from misago.categories.models import THREADS_ROOT_NAME, Category
from misago.categories.permissions import can_browse_category, can_see_category
from misago.conf import settings
from misago.core.validators import validate_sluggable
from .threadtypes import trees_map
def validate_category(user, category_id, allow_root=False):
try:
threads_tree_id = trees_map.get_tree_id_for_root(THREADS_ROOT_NAME)
category = Category.objects.get(
tree_id=threads_tree_id,
id=category_id,
)
except Category.DoesNotExist:
category = None
# Skip ACL validation for root category?
if allow_root and category and not category.level:
return category
if not category or not can_see_category(user, category):
raise ValidationError(_("Requested category could not be found."))
if not can_browse_category(user, category):
raise ValidationError(_("You don't have permission to access this category."))
return category
def validate_post(post):
post_len = len(post)
if not post_len:
raise ValidationError(_("You have to enter a message."))
if post_len < settings.post_length_min:
message = ungettext(
"Posted message should be at least %(limit_value)s character long (it has %(show_value)s).",
"Posted message should be at least %(limit_value)s characters long (it has %(show_value)s).",
settings.post_length_min)
raise ValidationError(message % {
'limit_value': settings.post_length_min,
'show_value': post_len
})
if settings.post_length_max and post_len > settings.post_length_max:
message = ungettext(
"Posted message cannot be longer than %(limit_value)s character (it has %(show_value)s).",
"Posted message cannot be longer than %(limit_value)s characters (it has %(show_value)s).",
settings.post_length_max)
raise ValidationError(message % {
'limit_value': settings.post_length_max,
'show_value': post_len
})
def validate_title(title):
title_len = len(title)
if not title_len:
raise ValidationError(_("You have to enter thread title."))
if title_len < settings.thread_title_length_min:
message = ungettext(
"Thread title should be at least %(limit_value)s character long (it has %(show_value)s).",
"Thread title should be at least %(limit_value)s characters long (it has %(show_value)s).",
settings.thread_title_length_min)
raise ValidationError(message % {
'limit_value': settings.thread_title_length_min,
'show_value': title_len
})
if title_len > settings.thread_title_length_max:
message = ungettext(
"Thread title cannot be longer than %(limit_value)s character (it has %(show_value)s).",
"Thread title cannot be longer than %(limit_value)s characters (it has %(show_value)s).",
settings.thread_title_length_max)
raise ValidationError(message % {
'limit_value': settings.thread_title_length_max,
'show_value': title_len
})
error_not_sluggable = _("Thread title should contain alpha-numeric characters.")
error_slug_too_long = _("Thread title is too long.")
validate_sluggable(error_not_sluggable, error_slug_too_long)(title)
return title
| gpl-2.0 | -4,940,545,722,011,384,000 | 38.131868 | 105 | 0.651783 | false |
engla/kupfer | kupfer/plugin/firefox_keywords.py | 1 | 7697 | __kupfer_name__ = _("Firefox Keywords")
__kupfer_sources__ = ("KeywordsSource", )
__kupfer_text_sources__ = ("KeywordSearchSource", )
__kupfer_actions__ = ("SearchWithEngine", )
__description__ = _("Search the web with Firefox keywords")
__version__ = "2017.1"
__author__ = ""
from configparser import RawConfigParser
from contextlib import closing
import os
import sqlite3
from urllib.parse import quote, urlparse
from kupfer import plugin_support
from kupfer.objects import Source, Action, Leaf
from kupfer.objects import TextLeaf, TextSource
from kupfer.obj.helplib import FilesystemWatchMixin
from kupfer.obj.objects import OpenUrl, RunnableLeaf
from kupfer import utils
from kupfer import config, plugin_support
__kupfer_settings__ = plugin_support.PluginSettings(
{
"key" : "default",
"label": _("Default for ?"),
"type": str,
"value": 'https://www.google.com/search?ie=UTF-8&q=%s',
}
)
def get_firefox_home_file(needed_file):
firefox_dir = os.path.expanduser("~/.mozilla/firefox")
if not os.path.exists(firefox_dir):
return None
config = RawConfigParser({"Default" : 0})
config.read(os.path.join(firefox_dir, "profiles.ini"))
path = None
for section in config.sections():
if config.has_option(section, "Default") and config.get(section, "Default") == "1":
path = config.get (section, "Path")
break
elif path == None and config.has_option(section, "Path"):
path = config.get (section, "Path")
if path == None:
return ""
if path.startswith("/"):
return os.path.join(path, needed_file)
return os.path.join(firefox_dir, path, needed_file)
def _url_domain(text):
components = list(urlparse(text))
domain = "".join(components[1:2])
return domain
class Keyword(Leaf):
def __init__(self, title, kw, url):
title = title if title else _url_domain(url)
name = "%s (%s)" % (kw, title)
super().__init__(url, name)
self.keyword = kw
def _is_search(self):
return "%s" in self.object
def get_actions(self):
if self._is_search():
yield SearchFor()
else:
yield OpenUrl()
def get_description(self):
return self.object
def get_icon_name(self):
return "text-html"
def get_text_representation(self):
return self.object
class KeywordsSource (Source, FilesystemWatchMixin):
instance = None
def __init__(self):
super().__init__(_("Firefox Keywords"))
def initialize(self):
KeywordsSource.instance = self
ff_home = get_firefox_home_file('')
self.monitor_token = self.monitor_directories(ff_home)
def finalize(self):
KeywordsSource.instance = None
def monitor_include_file(self, gfile):
return gfile and gfile.get_basename() == 'lock'
def _get_ffx3_bookmarks(self):
"""Query the firefox places bookmark database"""
fpath = get_firefox_home_file("places.sqlite")
if not (fpath and os.path.isfile(fpath)):
return []
try:
self.output_debug("Reading bookmarks from", fpath)
with closing(sqlite3.connect(fpath, timeout=1)) as conn:
c = conn.cursor()
c.execute("""SELECT moz_places.url, moz_places.title, moz_keywords.keyword
FROM moz_places, moz_keywords
WHERE moz_places.id = moz_keywords.place_id
""")
return [Keyword(title, kw, url) for url, title, kw in c]
except sqlite3.Error:
# Something is wrong with the database
self.output_exc()
return []
def get_items(self):
seen_keywords = set()
for kw in self._get_ffx3_bookmarks():
seen_keywords.add(kw.keyword)
yield kw
def get_description(self):
return None
def get_icon_name(self):
return "web-browser"
def provides(self):
yield Keyword
class SearchWithEngine (Action):
"""TextLeaf -> SearchWithEngine -> Keyword"""
action_accelerator = "s"
def __init__(self):
Action.__init__(self, _("Search With..."))
def activate(self, leaf, iobj):
url = iobj.object
_do_search_engine(leaf.object, url)
def item_types(self):
yield TextLeaf
def requires_object(self):
return True
def object_types(self):
yield Keyword
def valid_object(self, obj, for_item):
return obj._is_search()
def object_source(self, for_item=None):
return KeywordsSource()
def get_description(self):
return _("Search the web with Firefox keywords")
def get_icon_name(self):
return "edit-find"
class SearchFor (Action):
"""Keyword -> SearchFor -> TextLeaf
This is the opposite action to SearchWithEngine
"""
action_accelerator = "s"
def __init__(self):
Action.__init__(self, _("Search For..."))
def activate(self, leaf, iobj):
url = leaf.object
terms = iobj.object
_do_search_engine(terms, url)
def item_types(self):
yield Keyword
def requires_object(self):
return True
def object_types(self):
yield TextLeaf
def object_source(self, for_item):
return TextSource(placeholder=_("Search Terms"))
def valid_object(self, obj, for_item):
# NOTE: Using exact class to skip subclasses
return type(obj) == TextLeaf
def get_description(self):
return _("Search the web with Firefox keywords")
def get_icon_name(self):
return "edit-find"
class KeywordSearchSource(TextSource):
def __init__(self):
super().__init__(_("Firefox Keywords (?-source)"))
def get_text_items(self, text):
if not text.startswith("?"):
return
parts = text[1:].split(maxsplit=1)
if len(parts) < 1:
return
query = parts[1] if len(parts) > 1 else ""
for kw in KeywordsSource.instance.get_leaves():
if kw._is_search() and kw.keyword == parts[0]:
yield SearchWithKeyword(kw, query)
return
default = __kupfer_settings__['default'].strip()
if default:
if '%s' not in default:
default += '%s'
yield SearchWithKeyword(Keyword(None, "", default), text[1:])
def get_description(self):
return None
def get_icon_name(self):
return "web-browser"
def provides(self):
yield SearchWithKeyword
def get_rank(self):
return 80
class SearchWithKeyword(RunnableLeaf):
def __init__(self, keyword, text):
super().__init__((keyword, text), _('Search for "%s"') % (text, ))
def run(self):
kw = self.keyword_leaf
_do_search_engine(self.query, kw.object)
@property
def keyword_leaf(self):
return self.object[0]
@property
def query(self):
return self.object[1]
def get_icon_name(self):
return "web-browser"
def get_description(self):
return _("Search using %s") % self.keyword_leaf
def get_text_representation(self):
kw = self.keyword_leaf
return _query_url(self.query, kw.object)
def _do_search_engine(terms, search_url, encoding="UTF-8"):
"""Show an url searching for @search_url with @terms"""
utils.show_url(_query_url(terms, search_url))
def _query_url(terms, search_url):
"""Show an url searching for @search_url with @terms"""
query_url = search_url.replace("%s", quote(terms))
return query_url
| gpl-3.0 | 5,112,285,602,855,265,000 | 27.507407 | 91 | 0.597376 | false |
andjey/status-tracker | src/python/status_tracker/db.py | 2 | 2963 | """This module contains functions for work with database."""
import time
from db_pool import DBPool
from config_parser import Config
conf_parser = Config()
CONFIGS = conf_parser.get_config()
DB_POOL = DBPool({'user': CONFIGS['db.user'],
'passwd': CONFIGS['db.passwd'],
'db_name': CONFIGS['db.db_name'],
'ttl': int(CONFIGS['db_pool.ttl'])},
pool_size=int(CONFIGS['db_pool.pool_size']))
def log(msg):
"""Inserts log into db.
This function takes log message and writes it to database.
:Parameters:
- `msg`: log message to write into database.
"""
with DB_POOL.connection() as conn:
cursor = conn.cursor()
query = """INSERT INTO `log` (event, time)
VALUES (%s, %s);
"""
cursor.execute(query, (msg, str(int(time.time()))))
conn.commit()
def change_status(status, client_id):
"""Changes status of client.
This function takes new status and client_id and changes status of that
client.
:Parameters:
- `status`: new status for update,
- `client_id`: client_id of client to update message.
"""
with DB_POOL.connection() as conn:
cursor = conn.cursor()
query = """UPDATE `client`
SET `status`=%s
WHERE `id`= %s;
"""
cursor.execute(query, (status, str(client_id)))
conn.commit()
def insert_tweet(tweets, search_key):
"""Inserts tweet into database.
This function takes tweet dictionary and search_key and inserts this data
into database.
:Parameters:
- `tweets`: dictionary with tweet data,
- `search_key`: key of searching for tweets in twitter.
"""
with DB_POOL.connection() as conn:
cursor = conn.cursor()
query = u"""REPLACE INTO `tweet` (tweet_id, author, content, date,
source_url, search_key)
VALUES (%s, %s, %s, %s, %s, %s);
"""
for tweet in tweets:
if tweet:
cursor.execute(query, (tweet['tweet_id'], tweet['author'],
tweet['content'], tweet['date'],
tweet['source_url'], search_key))
conn.commit()
def retrieve_tweets(query_string):
"""Retrieves tweets from db by given query_string.
This function takes query_string as parameter and fetches all tweets with
given query_string from db.
:Parameters:
- `query_string`: string which defines search key for tweets.
:Returnes:
tuple with retrieved tweets.
"""
with DB_POOL.connection() as conn:
cursor = conn.cursor()
query = """SELECT * FROM tweet
WHERE search_key=%s;
"""
cursor.execute(query, (query_string, ))
return cursor.fetchall()
| mit | -7,399,160,648,368,389,000 | 29.864583 | 77 | 0.549781 | false |
pepsipepsi/nodebox_opengl_python3 | examples/Path/BitBop.py | 1 | 1858 | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
# BitBop -- a fun demonstration of path.contains.
#
# The textpath command returns a BezierPath of the text that can
# be manipulated or, as demonstrated here, queried using path.contains.
# A grid is generated and everywhere a point in the path is encountered,
# a random square is drawn.
def draw(canvas):
canvas.clear()
background(0.8, 0.7, 0)
fill(0.1, 0.1, 0.2)
# Set the font and create the text path.
font("Verdana", 100)
align(CENTER)
tp = textpath("NodeBox", 0, 100, width=500)
#tp.draw() # Draws the underlying path
# Here are the variables that influence the composition:
resx = 100 # The horizontal resolution
resy = 100 # The vertical resolution
rx = 5.0 # The horizontal randomness each point has
ry = 5.0 # The vertical randomness each point has
dotsize = 6.0 # The maximum size of one dot.
dx = 500 / float(resx) # The width each dot covers
dy = 500 / float(resy) # The height each dot covers
# We create a grid of the specified resolution.
# Each x,y coordinate is a measuring point where
# we check if it falls within the path.
for x, y in grid(resx, resy):
sz = random(dotsize)
# Create the point that will be checked
px = x*dx-sz
py = y*dy-sz
# Only do something if the point falls within the path bounds.
# You could add an "else" statement, that draws something in the
# empty positions.
if tp.contains(px, py):
# Change the color for each point -- try it out!
# fill(0, 0, random(), random())
oval(px+random(-rx, rx),
py+random(-ry, ry),
sz, sz)
canvas.size = 500,500
canvas.run(draw) | bsd-3-clause | -9,032,943,484,282,350,000 | 33.425926 | 72 | 0.636168 | false |
tomchristie/django | tests/auth_tests/test_auth_backends.py | 9 | 28001 | from datetime import date
from unittest import mock
from django.contrib.auth import (
BACKEND_SESSION_KEY, SESSION_KEY, authenticate, get_user, signals,
)
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import MD5PasswordHasher
from django.contrib.auth.models import AnonymousUser, Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, modify_settings, override_settings,
)
from .models import (
CustomPermissionsUser, CustomUser, CustomUserWithoutIsActiveField,
ExtensionUser, UUIDUser,
)
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super().encode(*args, **kwargs)
class BaseModelBackendTest:
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
)
self.patched_settings.enable()
self.create_users()
def tearDown(self):
self.patched_settings.disable()
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertIs(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertIs(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertIs(user.has_perm('auth.test'), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertIs(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), {'auth.test'})
self.assertEqual(user.get_group_permissions(), set())
self.assertIs(user.has_module_perms('Group'), False)
self.assertIs(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'})
self.assertIs(user.has_perm('test'), False)
self.assertIs(user.has_perm('auth.test'), True)
self.assertIs(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'}
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), {'auth.test_group'})
self.assertIs(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertIs(user.has_perm('test'), False)
self.assertIs(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
self.assertIs(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set())
self.assertIs(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), {'auth.test'})
def test_anonymous_has_no_permissions(self):
"""
#17903 -- Anonymous users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
with mock.patch.object(self.UserModel, 'is_anonymous', True):
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_inactive_has_no_permissions(self):
"""
#17903 -- Inactive users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_active = False
user.save()
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=['auth_tests.test_auth_backends.CountingMD5PasswordHasher'])
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists. Refs #20760."""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password('test')
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username='no_such_user', password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
user_credentials = {'username': 'test', 'password': 'test'}
def create_users(self):
self.user = User.objects.create_user(email='[email protected]', **self.user_credentials)
self.superuser = User.objects.create_superuser(
username='test2',
email='[email protected]',
password='test',
)
def test_authenticate_inactive(self):
"""
An inactive user can't authenticate.
"""
self.assertEqual(authenticate(**self.user_credentials), self.user)
self.user.is_active = False
self.user.save()
self.assertIsNone(authenticate(**self.user_credentials))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithoutIsActiveField')
def test_authenticate_user_without_is_active_field(self):
"""
A custom user without an `is_active` field is allowed to authenticate.
"""
user = CustomUserWithoutIsActiveField.objects._create_user(
username='test', email='[email protected]', password='test',
)
self.assertEqual(authenticate(username='test', password='test'), user)
@override_settings(AUTH_USER_MODEL='auth_tests.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test',
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
The model backend can accept a credentials kwarg labeled with
custom user model's USERNAME_FIELD.
"""
def test_authenticate(self):
test_user = CustomUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
authenticated_user = authenticate(email='[email protected]', password='test')
self.assertEqual(test_user, authenticated_user)
@override_settings(AUTH_USER_MODEL='auth_tests.UUIDUser')
class UUIDUserTests(TestCase):
def test_login(self):
"""
A custom user with a UUID primary key should be able to login.
"""
user = UUIDUser.objects.create_user(username='uuid', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
self.assertEqual(UUIDUser.objects.get(pk=self.client.session[SESSION_KEY]), user)
class TestObj:
pass
class SimpleRowlevelBackend:
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous and perm == 'anon':
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous:
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
@modify_settings(AUTHENTICATION_BACKENDS={
'append': 'auth_tests.test_auth_backends.SimpleRowlevelBackend',
})
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user2 = User.objects.create_user('test2', '[email protected]', 'test')
self.user3 = User.objects.create_user('test3', '[email protected]', 'test')
def tearDown(self):
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertIs(self.user1.has_perm('perm', TestObj()), False)
self.assertIs(self.user2.has_perm('perm', TestObj()), True)
self.assertIs(self.user2.has_perm('perm'), False)
self.assertIs(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertIs(self.user3.has_perm('perm', TestObj()), False)
self.assertIs(self.user3.has_perm('anon', TestObj()), False)
self.assertIs(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'simple'})
self.assertEqual(self.user2.get_all_permissions(TestObj()), {'simple', 'advanced'})
self.assertEqual(self.user2.get_all_permissions(), set())
def test_get_group_permissions(self):
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), {'group_perm'})
@override_settings(
AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'],
)
class AnonymousUserBackendTest(SimpleTestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
def setUp(self):
self.user1 = AnonymousUser()
def test_has_perm(self):
self.assertIs(self.user1.has_perm('perm', TestObj()), False)
self.assertIs(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertIs(self.user1.has_perms(['anon'], TestObj()), True)
self.assertIs(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertIs(self.user1.has_module_perms("app1"), True)
self.assertIs(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'anon'})
@override_settings(AUTHENTICATION_BACKENDS=[])
class NoBackendsTest(TestCase):
"""
An appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'test')
def test_raises_exception(self):
msg = (
'No authentication backends have been defined. '
'Does AUTHENTICATION_BACKENDS contain anything?'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.user.has_perm(('perm', TestObj()))
@override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'])
class InActiveUserBackendTest(TestCase):
"""
Tests for an inactive user
"""
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user1.is_active = False
self.user1.save()
def test_has_perm(self):
self.assertIs(self.user1.has_perm('perm', TestObj()), False)
self.assertIs(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertIs(self.user1.has_module_perms("app1"), False)
self.assertIs(self.user1.has_module_perms("app2"), False)
class PermissionDeniedBackend:
"""
Always raises PermissionDenied in `authenticate`, `has_perm` and `has_module_perms`.
"""
def authenticate(self, request, username=None, password=None):
raise PermissionDenied
def has_perm(self, user_obj, perm, obj=None):
raise PermissionDenied
def has_module_perms(self, user_obj, app_label):
raise PermissionDenied
class PermissionDeniedBackendTest(TestCase):
"""
Other backends are not checked once a backend raises PermissionDenied
"""
backend = 'auth_tests.test_auth_backends.PermissionDeniedBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user_login_failed = []
signals.user_login_failed.connect(self.user_login_failed_listener)
def tearDown(self):
signals.user_login_failed.disconnect(self.user_login_failed_listener)
def user_login_failed_listener(self, sender, credentials, **kwargs):
self.user_login_failed.append(credentials)
@modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend})
def test_permission_denied(self):
"user is not authenticated after a backend raises permission denied #2550"
self.assertIsNone(authenticate(username='test', password='test'))
# user_login_failed signal is sent.
self.assertEqual(self.user_login_failed, [{'password': '********************', 'username': 'test'}])
@modify_settings(AUTHENTICATION_BACKENDS={'append': backend})
def test_authenticates(self):
self.assertEqual(authenticate(username='test', password='test'), self.user1)
@modify_settings(AUTHENTICATION_BACKENDS={'prepend': backend})
def test_has_perm_denied(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), False)
self.assertIs(self.user1.has_module_perms('auth'), False)
@modify_settings(AUTHENTICATION_BACKENDS={'append': backend})
def test_has_perm(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), True)
self.assertIs(self.user1.has_module_perms('auth'), True)
class NewModelBackend(ModelBackend):
pass
class ChangedBackendSettingsTest(TestCase):
"""
Tests for changes in the settings.AUTHENTICATION_BACKENDS
"""
backend = 'auth_tests.test_auth_backends.NewModelBackend'
TEST_USERNAME = 'test_user'
TEST_PASSWORD = 'test_password'
TEST_EMAIL = '[email protected]'
def setUp(self):
User.objects.create_user(self.TEST_USERNAME, self.TEST_EMAIL, self.TEST_PASSWORD)
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_changed_backend_settings(self):
"""
Removing a backend configured in AUTHENTICATION_BACKENDS makes already
logged-in users disconnect.
"""
# Get a session for the test user
self.assertTrue(self.client.login(
username=self.TEST_USERNAME,
password=self.TEST_PASSWORD)
)
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
# Remove NewModelBackend
with self.settings(AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend']):
# Get the user from the request
user = get_user(request)
# Assert that the user retrieval is successful and the user is
# anonymous as the backend is not longer available.
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous)
class TypeErrorBackend:
"""
Always raises TypeError.
"""
def authenticate(self, request, username=None, password=None):
raise TypeError
class SkippedBackend:
def authenticate(self):
# Doesn't accept any credentials so is skipped by authenticate().
pass
class AuthenticateTests(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
@override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.TypeErrorBackend'])
def test_type_error_raised(self):
"""A TypeError within a backend is propagated properly (#18171)."""
with self.assertRaises(TypeError):
authenticate(username='test', password='test')
@override_settings(AUTHENTICATION_BACKENDS=(
'auth_tests.test_auth_backends.SkippedBackend',
'django.contrib.auth.backends.ModelBackend',
))
def test_skips_backends_without_arguments(self):
"""
A backend (SkippedBackend) is ignored if it doesn't accept the
credentials as arguments.
"""
self.assertEqual(authenticate(username='test', password='test'), self.user1)
class ImproperlyConfiguredUserModelTest(TestCase):
"""
An exception from within get_user_model() is propagated and doesn't
raise an UnboundLocalError (#21439).
"""
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.client.login(username='test', password='test')
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_does_not_shadow_exception(self):
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
msg = (
"AUTH_USER_MODEL refers to model 'thismodel.doesntexist' "
"that has not been installed"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_user(request)
class ImportedModelBackend(ModelBackend):
pass
class CustomModelBackend(ModelBackend):
pass
class OtherModelBackend(ModelBackend):
pass
class ImportedBackendTests(TestCase):
"""
#23925 - The backend path added to the session should be the same
as the one defined in AUTHENTICATION_BACKENDS setting.
"""
backend = 'auth_tests.backend_alias.ImportedModelBackend'
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_backend_path(self):
username = 'username'
password = 'password'
User.objects.create_user(username, 'email', password)
self.assertTrue(self.client.login(username=username, password=password))
request = HttpRequest()
request.session = self.client.session
self.assertEqual(request.session[BACKEND_SESSION_KEY], self.backend)
class SelectingBackendTests(TestCase):
backend = 'auth_tests.test_auth_backends.CustomModelBackend'
other_backend = 'auth_tests.test_auth_backends.OtherModelBackend'
username = 'username'
password = 'password'
def assertBackendInSession(self, backend):
request = HttpRequest()
request.session = self.client.session
self.assertEqual(request.session[BACKEND_SESSION_KEY], backend)
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_backend_path_login_without_authenticate_single_backend(self):
user = User.objects.create_user(self.username, 'email', self.password)
self.client._login(user)
self.assertBackendInSession(self.backend)
@override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend])
def test_backend_path_login_without_authenticate_multiple_backends(self):
user = User.objects.create_user(self.username, 'email', self.password)
expected_message = (
'You have multiple authentication backends configured and '
'therefore must provide the `backend` argument or set the '
'`backend` attribute on the user.'
)
with self.assertRaisesMessage(ValueError, expected_message):
self.client._login(user)
@override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend])
def test_backend_path_login_with_explicit_backends(self):
user = User.objects.create_user(self.username, 'email', self.password)
self.client._login(user, self.other_backend)
self.assertBackendInSession(self.other_backend)
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AllowAllUsersModelBackendTest(TestCase):
"""
Inactive users may authenticate with the AllowAllUsersModelBackend.
"""
user_credentials = {'username': 'test', 'password': 'test'}
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
email='[email protected]', is_active=False,
**cls.user_credentials
)
def test_authenticate(self):
self.assertFalse(self.user.is_active)
self.assertEqual(authenticate(**self.user_credentials), self.user)
def test_get_user(self):
self.client.force_login(self.user)
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertEqual(user, self.user)
| bsd-3-clause | 6,342,754,459,873,517,000 | 37.357534 | 110 | 0.664619 | false |
usc-isi-i2/etk | etk/extractor.py | 1 | 1848 | from enum import Enum, auto
from typing import List
from etk.extraction import Extraction
class InputType(Enum):
"""
TEXT: value must be a string
HTML: the value is HTML text
TOKENS: value must first be tokenized
OBJECT: value can be anything
"""
TEXT = auto()
HTML = auto()
TOKENS = auto()
OBJECT = auto()
class Extractor(object):
"""
All extractors extend this abstract class.
"""
def __init__(self, input_type: InputType = InputType.TEXT, category: str = None, name: str = None):
self._category = category
self._name = name
self._input_type = input_type
@property
def input_type(self) -> InputType:
"""
The type of input that an extractor wants
Returns: InputType
"""
return self._input_type
@property
def name(self) -> str:
"""
The name of an extractor shown to users.
Different instances ought to have different names, e.g., a glossary extractor for city_name could have
name "city name extractor".
Returns: string, the name of an extractor.
"""
return self._name
@property
def category(self) -> str:
"""
Identifies a whole category of extractors, all instances should have the same category and
different names.
Returns: string, a label to identify the category of an extractor.
"""
return self._category
def extract(self, *input_value, **configs) -> List[Extraction]:
"""
Args:
input_value (): some extractors may want multiple arguments, for example, to
concatenate them together
configs (): any configs/options of extractors
Returns: list of extracted data, which can be any Python object
"""
pass
| mit | -9,128,917,080,397,884,000 | 26.176471 | 110 | 0.607684 | false |
shreyb/gracc-reporting | gracc_reporting/NiceNum.py | 1 | 2606 | """
Having written a bunch of scientific software, I am always amazed
at how few languages have built in routines for displaying numbers
nicely. I was doing a little programming in Python and got surprised
again. I couldn't find any routines for displaying numbers to
a significant number of digits and adding appropriate commas and
spaces to long digit sequences. Below is my attempt to write
a nice number formatting routine for Python. It is not particularly
fast. I suspect building the string by concatenation is responsible
for much of its slowness. Suggestions on how to improve the
implementation will be gladly accepted.
David S. Harrison
([email protected])
Returns a nicely formatted string for the floating point number
provided. This number will be rounded to the supplied accuracy
and commas and spaces will be added. I think every language should
do this for numbers. Why don't they? Here are some examples:
>>> niceNum(123567.0, 1000)
'124,000'
>>> niceNum(5.3918e-07, 1e-10)
'0.000 000 539 2'
This kind of thing is wonderful for producing tables for
human consumption.
"""
import math
def niceNum(num, precision=1):
"""Returns a string representation for a floating point number
that is rounded to the given precision and displayed with
commas and spaces."""
accpow = int(math.floor(math.log10(precision)))
if num < 0:
digits = int(math.fabs(num/pow(10,accpow)-0.5))
else:
digits = int(math.fabs(num/pow(10,accpow)+0.5))
result = ''
if digits > 0:
for i in range(0,accpow):
if (i % 3)==0 and i>0:
result = '0,' + result
else:
result = '0' + result
curpow = int(accpow)
while digits > 0:
adigit = chr((digits % 10) + ord('0'))
if (curpow % 3)==0 and curpow!=0 and len(result)>0:
if curpow < 0:
result = adigit + ' ' + result
else:
result = adigit + ',' + result
elif curpow==0 and len(result)>0:
result = adigit + '.' + result
else:
result = adigit + result
digits = digits/10
curpow = curpow + 1
for i in range(curpow,0):
if (i % 3)==0 and i!=0:
result = '0 ' + result
else:
result = '0' + result
if curpow <= 0:
result = "0." + result
if num < 0:
result = '-' + result
else:
result = "0"
return result
| apache-2.0 | -3,577,675,567,020,844,000 | 34.216216 | 69 | 0.589793 | false |
negrinho/deep_architect | dev/tutorials/mnist/dynet/mnist_dynet.py | 1 | 8297 | # Search Space for DyNet
# NOTE: No Batch_norm since DyNet has not supported batch norm
import dynet as dy
import numpy as np
from deep_architect.helpers.dynet_support import DyParameterCollection, siso_dynet_module
import deep_architect.modules as mo
import deep_architect.hyperparameters as hp
M = DyParameterCollection()
D = hp.Discrete
def flatten():
def compile_fn(di, dh):
shape = di['in'].dim()
n = np.product(shape[0])
Flatten = dy.reshape
def fn(di):
return {'out': Flatten(di['in'], (n,))}
return fn
return siso_dynet_module('Flatten', compile_fn, {})
def dense(h_u):
def compile_fn(di, dh):
shape = di['in'].dim() # ((r, c), batch_dim)
m, n = dh['units'], shape[0][0]
pW = M.get_collection().add_parameters((m, n))
pb = M.get_collection().add_parameters((m, 1))
Dense = dy.affine_transform
def fn(di):
In = di['in']
W, b = pW.expr(), pb.expr()
# return {'out': W*In + b}
return {'out': Dense([b, W, In])}
return fn
return siso_dynet_module('Dense', compile_fn, {'units': h_u})
# just put here to streamline everything
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = dy.rectify(di['in'])
elif nonlin_name == 'elu':
Out = dy.elu(di['in'])
elif nonlin_name == 'tanh':
Out = dy.tanh(di['in'])
else:
raise ValueError
return {'out': Out}
return fn
return siso_dynet_module('Nonlinearity', compile_fn,
{'nonlin_name': h_nonlin_name})
def dropout(h_keep_prob):
def compile_fn(di, dh):
p = dh['keep_prop']
Dropout = dy.dropout
def fn(di):
return {'out': Dropout(di['in'], p)}
return fn
return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})
def dnn_net_simple(num_classes):
# declaring hyperparameter
h_nonlin_name = D(['relu', 'tanh',
'elu']) # nonlinearity function names to choose from
h_opt_drop = D(
[0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include
h_drop_keep_prob = D([0.25, 0.5,
0.75]) # dropout probability to choose from
h_num_hidden = D([64, 128, 256, 512, 1024
]) # number of hidden units for affine transform module
h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice
# defining search space topology
model = mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
]), h_num_repeats),
dense(D([num_classes]))
])
return model
def dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):
return mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop)
])
def dnn_net(num_classes):
h_nonlin_name = D(['relu', 'tanh', 'elu'])
h_opt_drop = D([0, 1])
return mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: dnn_cell(D([64, 128, 256, 512, 1024]), h_nonlin_name,
h_opt_drop, D([0.25, 0.5, 0.75])), D([1, 2])),
dense(D([num_classes]))
])
# Main/Searcher
# Getting and reading mnist data adapted from here:
# https://github.com/clab/dynet/blob/master/examples/mnist/mnist-autobatch.py
import deep_architect.searchers.random as se
import deep_architect.core as co
from deep_architect.contrib.misc.datasets.loaders import load_mnist
def get_search_space(num_classes):
def fn():
co.Scope.reset_default_scope()
inputs, outputs = dnn_net(num_classes)
return inputs, outputs, {}
return fn
def main():
num_classes = 10
num_samples = 3 # number of architecture to sample
best_val_acc, best_architecture = 0., -1
# donwload and normalize data, using test as val for simplicity
X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',
normalize_range=True)
# defining evaluator
evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val),
num_classes,
max_num_training_epochs=5,
log_output_to_terminal=True)
searcher = se.RandomSearcher(get_search_space(num_classes))
for i in xrange(num_samples):
print("Sampling architecture %d" % i)
M.renew_collection()
inputs, outputs, _, searcher_eval_token = searcher.sample()
val_acc = evaluator.evaluate(
inputs,
outputs)['val_acc'] # evaluate and return validation accuracy
print("Finished evaluating architecture %d, validation accuracy is %f" %
(i, val_acc))
if val_acc > best_val_acc:
best_val_acc = val_acc
best_architecture = i
searcher.update(val_acc, searcher_eval_token)
print("Best validation accuracy is %f with architecture %d" %
(best_val_acc, best_architecture))
# Evaluator
import random
class SimpleClassifierEvaluator:
def __init__(self,
train_dataset,
val_dataset,
num_classes,
max_num_training_epochs=10,
batch_size=16,
learning_rate=1e-3,
display_step=1,
log_output_to_terminal=True):
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.num_classes = num_classes
self.max_num_training_epochs = max_num_training_epochs
self.learning_rate = learning_rate
self.batch_size = batch_size
self.log_output_to_terminal = log_output_to_terminal
self.display_step = display_step
def compute_accuracy(self, inputs, outputs):
correct = 0
for (label, img) in self.val_dataset:
dy.renew_cg()
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
pred = np.argmax(logits.npvalue())
if (label == pred): correct += 1
return (1.0 * correct / len(self.val_dataset))
def evaluate(self, inputs, outputs):
params = M.get_collection()
optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
num_batches = int(len(self.train_dataset) / self.batch_size)
for epoch in range(self.max_num_training_epochs):
random.shuffle(self.train_dataset)
i = 0
total_loss = 0
while (i < len(self.train_dataset)):
dy.renew_cg()
mbsize = min(self.batch_size, len(self.train_dataset) - i)
minibatch = self.train_dataset[i:i + mbsize]
losses = []
for (label, img) in minibatch:
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
loss = dy.pickneglogsoftmax(logits, label)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
optimizer.update()
total_loss += mbloss.scalar_value()
i += mbsize
val_acc = self.compute_accuracy(inputs, outputs)
if self.log_output_to_terminal and epoch % self.display_step == 0:
print("epoch:", '%d' % (epoch + 1), "loss:",
"{:.9f}".format(total_loss / num_batches),
"validation_accuracy:", "%.5f" % val_acc)
val_acc = self.compute_accuracy(inputs, outputs)
return {'val_acc': val_acc}
if __name__ == "__main__":
main() | mit | -8,312,957,813,745,671,000 | 31.162791 | 89 | 0.546704 | false |
release-monitoring/anitya | anitya/sar.py | 1 | 2237 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
This script is used for GDPR SAR (General Data Protection Regulation
Subject Access Requests).
It returns information about specific user saved by Anitya.
It reads SAR_USERNAME and SAR_EMAIL environment variables as keys for
getting data from database.
Authors:
Michal Konecny <[email protected]>
"""
import logging
import os
import json
import sys
from anitya.config import config
from anitya import db
_log = logging.getLogger("anitya")
def main():
"""
Retrieve database entry for user.
"""
db.initialize(config)
_log.setLevel(logging.DEBUG)
sar_username = os.getenv("SAR_USERNAME")
sar_email = os.getenv("SAR_EMAIL")
users = []
if sar_email:
_log.debug("Find users by e-mail {}".format(sar_email))
users = users + db.User.query.filter_by(email=sar_email).all()
if sar_username:
_log.debug("Find users by username {}".format(sar_username))
users = users + db.User.query.filter_by(username=sar_username).all()
users_list = []
for user in users:
users_list.append(user.to_dict())
json.dump(users_list, sys.stdout)
if __name__ == "__main__":
_log.info("SAR script start")
main()
_log.info("SAR script end")
| gpl-2.0 | 5,467,713,474,153,368,000 | 29.643836 | 76 | 0.707644 | false |
thyagostall/ikanos | ikanos.py | 1 | 3242 | import threading
import socket
import time
import os
HOST = ''
PORT = 8888
BASE_DIR = 'www/'
def log(type, message):
print(type + ": " + message)
class ClientThread(threading.Thread):
def bytes_from_file(self, filename, chunksize=8192):
result = []
try:
with open(filename, "rb") as f:
while True:
chunk = f.read(chunksize)
if chunk:
result.append(chunk)
else:
break
except:
result = []
return result
def request_to_filename(self, request):
request_string = request.decode('UTF-8')
start = 5
end = request_string.find("HTTP/1.") - 1
return request_string[start:end]
def get_filename(self, request_file_name):
if not request_file_name:
request_file_name = 'index.html'
if not self.get_file_ext(request_file_name):
request_file_name += '.html'
return os.path.join(os.path.dirname(os.path.realpath(__file__)), BASE_DIR, request_file_name)
def get_file_ext(self, file_name):
extension = os.path.splitext(file_name)
return extension[len(extension) - 1]
def __init__(self, client_connection):
self.client_connection = client_connection
super(ClientThread, self).__init__()
def get_header(self, response_code):
result = "HTTP/1.0 " + str(response_code)
if response_code == 200:
result += " OK"
else:
result += " Not Found"
result += "\n\n"
return result
def run(self):
try:
request = self.client_connection.recv(1024)
file_name = self.request_to_filename(request)
file_name = self.get_filename(file_name)
log('Debug', "Request for: " + file_name)
response = self.bytes_from_file(file_name)
header = ""
if response:
header = self.get_header(200)
else:
header = self.get_header(404)
response = self.bytes_from_file(self.get_filename("404.html"))
log('Debug', "Response header: '" + header.strip() + "'")
except:
header = self.get_header(500)
response = self.bytes_from_file(self.get_filename("500.html"))
try:
self.client_connection.sendall(bytes(header, 'UTF-8'))
for i in response:
self.client_connection.sendall(i)
except:
pass
self.client_connection.close()
log('Debug', 'Socket closed')
class Server:
def serve(self):
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
print('Serving HTTP on port', PORT)
while True:
(clientsocket, address) = listen_socket.accept()
log('Debug', 'New socket: ' + str(address))
client = ClientThread(clientsocket)
client.start()
if __name__ == "__main__":
Server().serve()
| mit | -5,922,869,094,040,249,000 | 27.438596 | 101 | 0.544725 | false |
apbard/scipy | scipy/_lib/_gcutils.py | 25 | 2465 | """
Module for testing automatic garbage collection of objects
.. autosummary::
:toctree: generated/
set_gc_state - enable or disable garbage collection
gc_state - context manager for given state of garbage collector
assert_deallocated - context manager to check for circular references on object
"""
import weakref
import gc
from contextlib import contextmanager
__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
class ReferenceError(AssertionError):
pass
def set_gc_state(state):
""" Set status of garbage collector """
if gc.isenabled() == state:
return
if state:
gc.enable()
else:
gc.disable()
@contextmanager
def gc_state(state):
""" Context manager to set state of garbage collector to `state`
Parameters
----------
state : bool
True for gc enabled, False for disabled
Examples
--------
>>> with gc_state(False):
... assert not gc.isenabled()
>>> with gc_state(True):
... assert gc.isenabled()
"""
orig_state = gc.isenabled()
set_gc_state(state)
yield
set_gc_state(orig_state)
@contextmanager
def assert_deallocated(func, *args, **kwargs):
"""Context manager to check that object is deallocated
This is useful for checking that an object can be freed directly by
reference counting, without requiring gc to break reference cycles.
GC is disabled inside the context manager.
Parameters
----------
func : callable
Callable to create object to check
\\*args : sequence
positional arguments to `func` in order to create object to check
\\*\\*kwargs : dict
keyword arguments to `func` in order to create object to check
Examples
--------
>>> class C(object): pass
>>> with assert_deallocated(C) as c:
... # do something
... del c
>>> class C(object):
... def __init__(self):
... self._circular = self # Make circular reference
>>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
... # do something
... del c
Traceback (most recent call last):
...
ReferenceError: Remaining reference(s) to object
"""
with gc_state(False):
obj = func(*args, **kwargs)
ref = weakref.ref(obj)
yield obj
del obj
if ref() is not None:
raise ReferenceError("Remaining reference(s) to object")
| bsd-3-clause | -5,201,049,081,509,237,000 | 24.677083 | 82 | 0.619473 | false |
xfouloux/Flexget | flexget/plugins/filter/exists_series.py | 5 | 5521 | from __future__ import unicode_literals, division, absolute_import
import copy
import logging
from path import Path
from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.log import log_once
from flexget.utils.template import RenderError
from flexget.plugins.parsers import ParseWarning
from flexget.plugin import get_plugin_by_name
log = logging.getLogger('exists_series')
class FilterExistsSeries(object):
"""
Intelligent series aware exists rejecting.
Example::
exists_series: /storage/series/
"""
schema = {
'anyOf': [
one_or_more({'type': 'string', 'format': 'path'}),
{
'type': 'object',
'properties': {
'path': one_or_more({'type': 'string', 'format': 'path'}),
'allow_different_qualities': {'enum': ['better', True, False], 'default': False}
},
'required': ['path'],
'additionalProperties': False
}
]
}
def prepare_config(self, config):
# if config is not a dict, assign value to 'path' key
if not isinstance(config, dict):
config = {'path': config}
# if only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
return config
@plugin.priority(-1)
def on_task_filter(self, task, config):
if not task.accepted:
log.debug('Scanning not needed')
return
config = self.prepare_config(config)
accepted_series = {}
paths = set()
for entry in task.accepted:
if 'series_parser' in entry:
if entry['series_parser'].valid:
accepted_series.setdefault(entry['series_parser'].name, []).append(entry)
for folder in config['path']:
try:
paths.add(entry.render(folder))
except RenderError as e:
log.error('Error rendering path `%s`: %s', folder, e)
else:
log.debug('entry %s series_parser invalid', entry['title'])
if not accepted_series:
log.warning('No accepted entries have series information. exists_series cannot filter them')
return
# scan through
# For speed, only test accepted entries since our priority should be after everything is accepted.
for series in accepted_series:
# make new parser from parser in entry
series_parser = accepted_series[series][0]['series_parser']
for folder in paths:
folder = Path(folder).expanduser()
if not folder.isdir():
log.warning('Directory %s does not exist', folder)
continue
for filename in folder.walk(errors='ignore'):
# run parser on filename data
try:
disk_parser = get_plugin_by_name('parsing').instance.parse_series(data=filename.name,
name=series_parser.name)
except ParseWarning as pw:
disk_parser = pw.parsed
log_once(pw.value, logger=log)
if disk_parser.valid:
log.debug('name %s is same series as %s', filename.name, series)
log.debug('disk_parser.identifier = %s', disk_parser.identifier)
log.debug('disk_parser.quality = %s', disk_parser.quality)
log.debug('disk_parser.proper_count = %s', disk_parser.proper_count)
for entry in accepted_series[series]:
log.debug('series_parser.identifier = %s', entry['series_parser'].identifier)
if disk_parser.identifier != entry['series_parser'].identifier:
log.trace('wrong identifier')
continue
log.debug('series_parser.quality = %s', entry['series_parser'].quality)
if config.get('allow_different_qualities') == 'better':
if entry['series_parser'].quality > disk_parser.quality:
log.trace('better quality')
continue
elif config.get('allow_different_qualities'):
if disk_parser.quality != entry['series_parser'].quality:
log.trace('wrong quality')
continue
log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)
if disk_parser.proper_count >= entry['series_parser'].proper_count:
entry.reject('proper already exists')
continue
else:
log.trace('new one is better proper, allowing')
continue
@event('plugin.register')
def register_plugin():
plugin.register(FilterExistsSeries, 'exists_series', groups=['exists'], api_ver=2)
| mit | 3,885,300,402,778,404,400 | 43.524194 | 114 | 0.512588 | false |
tong-wang/PyRPLidar | rplidar_types.py | 1 | 5669 | """
RPLidar Types Definition
partly translated from <rptypes.h> of RPLidar SDK v1.4.5
by Tong Wang
* Copyright (c) 2014, RoboPeak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* RoboPeak LIDAR System
* Common Types definition
*
* Copyright 2009 - 2014 RoboPeak Team
* http://www.robopeak.com
*
"""
import logging
import time
from collections import deque
import numpy as np
from rplidar_protocol import *
from rplidar_cmd import *
class RPLidarRawFrame(object):
"""Raw frame from the RPLidar scan.
Save the timestamp and raw points of a complete frame. This is for archiving
the data.
Attibutes:
timestamp: when the frame is initiated and recorded.
raw_points: a list of points (in raw RPLidar binary format) of a
complete frame, starting from the point with syncbit == 1.
"""
def __init__(self):
self.timestamp = time.time()
self.raw_points = list()
def add_raw_point(self, raw_point):
"""append new raw_point to the points list."""
self.raw_points.append(raw_point)
class RPLidarFrame(object):
"""A processed frame with readily usable coordinates.
Contains a moving window (implemented by deques with maxlen) of the most
recent $maxlen$ points, each being converted from raw point data to both
Cartesian and polar coordinates.
This is mainly for real-time visualization of the points.
Attributes:
angle_d: a deque keeping angle in degrees
angle_r: a deque keeping angle in radians
distance: a deque keeping distance in millimeters
x: a deque keeping x coordinate in millimeters
y: a deque keeping y coordinate in millimeters
Methods:
add_point():
"""
def __init__(self, maxlen=360):
#self.updated = False
self.angle_d = deque(maxlen = maxlen)
self.angle_r = deque(maxlen = maxlen)
self.distance = deque(maxlen = maxlen)
self.x = deque(maxlen = maxlen)
self.y = deque(maxlen = maxlen)
def add_point(self, point):
"""add a parsed point into the deques
Args:
point: a parsed point in rplidar_response_device_point_format.
"""
angle_d = ((point.angle_highbyte<<7) | point.byte1.angle_lowbyte) / 64.0
angle_r = np.radians(angle_d)
distance = point.distance_q2 / 4.0
self.angle_d.append(angle_d)
self.angle_r.append(angle_r)
self.distance.append(distance)
self.x.append(distance * np.sin(angle_r))
self.y.append(distance * np.cos(angle_r))
class RPLidarPoint(object):
def __init__(self, rawPoint):
#self.timestamp = time.clock()
self.raw = rawPoint
parsed = rplidar_response_device_point_format.parse(rawPoint)
self.syncbit = parsed.Byte0.syncbit
self.syncbit_inverse = parsed.Byte0.syncbit_inverse
self.quality = parsed.Byte0.sync_quality
self.check_bit = parsed.Byte1.check_bit
self.angleD = ((parsed.angle_highbyte << 7) |
parsed.Byte1.angle_lowbyte) / 64.0
self.angle = np.radians(self.angleD)
self.distance = parsed.distance_q2 / 4.0
self.X = self.distance * np.sin(self.angle)
self.Y = self.distance * np.cos(self.angle)
# TODO:
# Complete implementation of all exceptions
#
#
class RPLidarError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return "[RPLidar ERROR] %s\n" % str(self.message)
def log(self):
ret = "%s" % str(self.message)
if(hasattr(self, "reason")):
return "".join([ret, "\n==> %s" % str(self.reason)])
return ret
#RESULT_OK = 0
#RESULT_FAIL_BIT = 0x80000000
#RESULT_ALREADY_DONE = 0x20
#RESULT_INVALID_DATA = (0x8000 | RESULT_FAIL_BIT)
#RESULT_OPERATION_FAIL = (0x8001 | RESULT_FAIL_BIT)
#RESULT_OPERATION_TIMEOUT = (0x8002 | RESULT_FAIL_BIT)
#RESULT_OPERATION_STOP = (0x8003 | RESULT_FAIL_BIT)
#RESULT_OPERATION_NOT_SUPPORT = (0x8004 | RESULT_FAIL_BIT)
#RESULT_FORMAT_NOT_SUPPORT = (0x8005 | RESULT_FAIL_BIT)
#RESULT_INSUFFICIENT_MEMORY = (0x8006 | RESULT_FAIL_BIT)
| mit | 6,806,688,784,455,480,000 | 30.670391 | 80 | 0.650908 | false |
mozilla/mwc | apps/mozorg/forms.py | 1 | 4634 | # coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
from operator import itemgetter
from django import forms
from django.conf import settings
from django.forms import widgets
from django.utils.safestring import mark_safe
from captcha.fields import ReCaptchaField
from l10n_utils.dotlang import _
from product_details import product_details
from .email_contribute import INTEREST_CHOICES
FORMATS = (('H', 'HTML'), ('T', 'Text'))
LANGS = settings.NEWSLETTER_LANGUAGES
LANGS_TO_STRIP = ['en-US', 'es']
PARENTHETIC_RE = re.compile(r' \([^)]+\)$')
LANG_FILES = 'mozorg/contribute'
def strip_parenthetical(lang_name):
"""
Remove the parenthetical from the end of the language name string.
"""
return PARENTHETIC_RE.sub('', lang_name, 1)
def get_lang_choices():
"""
Return a localized list of choices for language.
"""
lang_choices = []
for lang in LANGS:
if lang in product_details.languages:
lang_name = product_details.languages[lang]['native']
else:
try:
locale = [loc for loc in product_details.languages.keys()
if loc.startswith(lang)][0]
except IndexError:
continue
lang_name = product_details.languages[locale]['native']
lang_choices.append([lang, strip_parenthetical(lang_name)])
return sorted(lang_choices, key=itemgetter(1))
class SideRadios(widgets.RadioFieldRenderer):
"""Render radio buttons as labels"""
def render(self):
radios = [unicode(w) for idx, w in enumerate(self)]
return mark_safe(''.join(radios))
class PrivacyWidget(widgets.CheckboxInput):
"""Render a checkbox with privacy text. Lots of pages need this so
it should be standardized"""
def render(self, name, value, attrs=None):
attrs['required'] = 'true'
input_txt = super(PrivacyWidget, self).render(name, value, attrs)
policy_txt = _(u'I’m okay with you handling this info as you explain '
u'in your <a href="%s">Privacy Policy</a>')
return mark_safe(
'<label for="%s" class="privacy-check-label">'
'%s '
'<span class="title">%s</span></label>'
% (attrs['id'], input_txt,
policy_txt % '/en-US/privacy-policy')
)
class EmailInput(widgets.TextInput):
input_type = 'email'
NEWSLETTER_CHOICES = (('app-dev',) * 2,
('mozilla-and-you',) * 2)
class NewsletterForm(forms.Form):
newsletter = forms.ChoiceField(choices=NEWSLETTER_CHOICES,
widget=forms.HiddenInput)
email = forms.EmailField(widget=EmailInput(attrs={'required': 'true'}))
fmt = forms.ChoiceField(widget=forms.RadioSelect(renderer=SideRadios),
choices=FORMATS,
initial='H')
privacy = forms.BooleanField(widget=PrivacyWidget)
source_url = forms.URLField(verify_exists=False, required=False)
LANG_CHOICES = get_lang_choices()
def __init__(self, locale, *args, **kwargs):
regions = product_details.get_regions(locale)
regions = sorted(regions.iteritems(), key=lambda x: x[1])
lang = country = locale.lower()
if '-' in lang:
lang, country = lang.split('-', 1)
lang = lang if lang in LANGS else 'en'
super(NewsletterForm, self).__init__(*args, **kwargs)
self.fields['country'] = forms.ChoiceField(choices=regions,
initial=country,
required=False)
self.fields['lang'] = forms.ChoiceField(choices=self.LANG_CHOICES,
initial=lang,
required=False)
class ContributeForm(forms.Form):
email = forms.EmailField(widget=EmailInput(attrs={'required': 'true'}))
privacy = forms.BooleanField(widget=PrivacyWidget)
newsletter = forms.BooleanField(required=False)
interest = forms.ChoiceField(
choices=INTEREST_CHOICES,
widget=forms.Select(attrs={'required': 'true'}))
comments = forms.CharField(
widget=forms.widgets.Textarea(attrs={'required': 'true',
'rows': '',
'cols': ''}))
captcha = ReCaptchaField(attrs={'theme': 'clean'})
| mpl-2.0 | 858,167,410,596,575,100 | 34.358779 | 78 | 0.593912 | false |
Thapelo-Tsotetsi/rapidpro | temba/api/migrations/0002_auto_20141126_2054.py | 7 | 1279 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('channels', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='webhookevent',
name='channel',
field=models.ForeignKey(blank=True, to='channels.Channel', help_text='The channel that this event is relating to', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='webhookevent',
name='created_by',
field=models.ForeignKey(related_name=b'api_webhookevent_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item'),
preserve_default=True,
),
migrations.AddField(
model_name='webhookevent',
name='modified_by',
field=models.ForeignKey(related_name=b'api_webhookevent_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item'),
preserve_default=True,
),
]
| agpl-3.0 | 8,742,962,980,570,259,000 | 35.542857 | 167 | 0.627052 | false |
desihub/qlf | backend/framework/qlf/ui_channel/alerts.py | 2 | 1948 | import os
import math
import datetime
import json
from clients import get_ics_daemon
from channels import Group
disk_percent_warning = os.environ.get('DISK_SPACE_PERCENT_WARNING')
disk_percent_alert = os.environ.get('DISK_SPACE_PERCENT_ALERT')
class Alerts:
def qa_alert(self, camera, qa, status, exposure_id):
notification_type = status
notification = json.dumps({
"text": "Exposure {}: {} {} {}".format(
exposure_id,
camera,
qa,
status,
),
"type": notification_type,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")
})
if notification_type is not None:
Group("monitor").send({
"text": json.dumps({
"notification": notification
})
})
def available_space(self):
statvfs = os.statvfs('./')
available_space = statvfs.f_frsize * statvfs.f_bavail
total_space = statvfs.f_frsize * statvfs.f_blocks
percent_free = int(available_space/total_space*100)
notification_type = None
if percent_free < int(disk_percent_warning):
notification_type = "WARNING"
if percent_free < int(disk_percent_alert):
notification_type = "ALARM"
if os.environ.get('START_ICS', 'False') is 'True':
self.notify_ics("Available Disk Space {}%".format(
percent_free
))
notification = json.dumps({
"text": "Available Disk Space {}%".format(
percent_free
),
"type": notification_type,
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")
})
if notification_type is not None:
return notification
def notify_ics(self, message):
ics = get_ics_daemon()
ics.alert(message)
| bsd-3-clause | 6,897,572,040,447,212,000 | 31.466667 | 77 | 0.542608 | false |
andersk/zulip | zerver/openapi/test_curl_examples.py | 3 | 5963 | # Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file contains the top-level logic for testing the cURL examples
# in Zulip's API documentation; the details are in
# zerver.openapi.curl_param_value_generators.
import html
import json
import os
import re
import shlex
import subprocess
import markdown
from django.conf import settings
from zulip import Client
from zerver.models import get_realm
from zerver.openapi import markdown_extension
from zerver.openapi.curl_param_value_generators import (
AUTHENTICATION_LINE,
assert_all_helper_functions_called,
)
from zerver.openapi.openapi import get_endpoint_from_operationid
def test_generated_curl_examples_for_success(client: Client) -> None:
default_authentication_line = f"{client.email}:{client.api_key}"
# A limited Markdown engine that just processes the code example syntax.
realm = get_realm("zulip")
md_engine = markdown.Markdown(
extensions=[markdown_extension.makeExtension(api_url=realm.uri + "/api")]
)
# We run our curl tests in alphabetical order (except that we
# delay the deactivate-user test to the very end), since we depend
# on "add" tests coming before "remove" tests in some cases. We
# should try to either avoid ordering dependencies or make them
# very explicit.
rest_endpoints_path = os.path.join(
settings.DEPLOY_ROOT, "templates/zerver/help/include/rest-endpoints.md"
)
rest_endpoints_raw = open(rest_endpoints_path, "r").read()
ENDPOINT_REGEXP = re.compile(r"/api/\s*(.*?)\)")
endpoint_list = sorted(set(re.findall(ENDPOINT_REGEXP, rest_endpoints_raw)))
for endpoint in endpoint_list:
article_name = endpoint + ".md"
file_name = os.path.join(settings.DEPLOY_ROOT, "templates/zerver/api/", article_name)
curl_commands_to_test = []
if os.path.exists(file_name):
f = open(file_name, "r")
for line in f:
# A typical example from the Markdown source looks like this:
# {generate_code_example(curl, ...}
if line.startswith("{generate_code_example(curl"):
curl_commands_to_test.append(line)
else:
# If the file doesn't exist, then it has been
# deleted and its page is generated by the
# template. Thus, the curl example would just
# a single one following the template's pattern.
endpoint_path, endpoint_method = get_endpoint_from_operationid(
endpoint.replace("-", "_")
)
endpoint_string = endpoint_path + ":" + endpoint_method
command = f"{{generate_code_example(curl)|{endpoint_string}|example}}"
curl_commands_to_test.append(command)
for line in curl_commands_to_test:
# To do an end-to-end test on the documentation examples
# that will be actually shown to users, we use the
# Markdown rendering pipeline to compute the user-facing
# example, and then run that to test it.
# Set AUTHENTICATION_LINE to default_authentication_line.
# Set this every iteration, because deactivate_own_user
# will override this for its test.
AUTHENTICATION_LINE[0] = default_authentication_line
curl_command_html = md_engine.convert(line.strip())
unescaped_html = html.unescape(curl_command_html)
curl_regex = re.compile(r"<code>curl\n(.*?)</code>", re.DOTALL)
commands = re.findall(curl_regex, unescaped_html)
for curl_command_text in commands:
curl_command_text = curl_command_text.replace(
"BOT_EMAIL_ADDRESS:BOT_API_KEY", AUTHENTICATION_LINE[0]
)
print("Testing {} ...".format(curl_command_text.split("\n")[0]))
# Turn the text into an arguments list.
generated_curl_command = [x for x in shlex.split(curl_command_text) if x != "\n"]
response_json = None
response = None
try:
# We split this across two lines so if curl fails and
# returns non-JSON output, we'll still print it.
response_json = subprocess.check_output(
generated_curl_command, universal_newlines=True
)
response = json.loads(response_json)
assert response["result"] == "success"
except (AssertionError, Exception):
error_template = """
Error verifying the success of the API documentation curl example.
File: {file_name}
Line: {line}
Curl command:
{curl_command}
Response:
{response}
This test is designed to check each generate_code_example(curl) instance in the
API documentation for success. If this fails then it means that the curl example
that was generated was faulty and when tried, it resulted in an unsuccessful
response.
Common reasons for why this could occur:
1. One or more example values in zerver/openapi/zulip.yaml for this endpoint
do not line up with the values in the test database.
2. One or more mandatory parameters were included in the "exclude" list.
To learn more about the test itself, see zerver/openapi/test_curl_examples.py.
"""
print(
error_template.format(
file_name=file_name,
line=line,
curl_command=generated_curl_command,
response=response_json
if response is None
else json.dumps(response, indent=4),
)
)
raise
assert_all_helper_functions_called()
| apache-2.0 | 1,421,830,716,775,949,000 | 40.699301 | 97 | 0.618481 | false |
onyxfish/nostaples | application.py | 1 | 10877 | #!/usr/bin/python
#~ This file is part of NoStaples.
#~ NoStaples is free software: you can redistribute it and/or modify
#~ it under the terms of the GNU General Public License as published by
#~ the Free Software Foundation, either version 3 of the License, or
#~ (at your option) any later version.
#~ NoStaples is distributed in the hope that it will be useful,
#~ but WITHOUT ANY WARRANTY; without even the implied warranty of
#~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#~ GNU General Public License for more details.
#~ You should have received a copy of the GNU General Public License
#~ along with NoStaples. If not, see <http://www.gnu.org/licenses/>.
"""
This module holds NoStaples' main method which handles
the instantiation of MVC objects and then starts the gtk
main loop.
"""
import logging.config
import os
import sys
import gtk
from nostaples import constants
from nostaples.controllers.about import AboutController
from nostaples.controllers.document import DocumentController
from nostaples.controllers.main import MainController
from nostaples.controllers.page import PageController
from nostaples.controllers.preferences import PreferencesController
from nostaples.controllers.save import SaveController
from nostaples.controllers.status import StatusController
from nostaples.models.document import DocumentModel
from nostaples.models.main import MainModel
from nostaples.models.page import PageModel
from nostaples.models.preferences import PreferencesModel
from nostaples.models.save import SaveModel
from nostaples.models.status import StatusModel
import nostaples.sane as saneme
import nostaples.utils.gtkexcepthook
from nostaples.utils.state import GConfStateManager
from nostaples.views.about import AboutView
from nostaples.views.document import DocumentView
from nostaples.views.main import MainView
from nostaples.views.page import PageView
from nostaples.views.preferences import PreferencesView
from nostaples.views.save import SaveView
from nostaples.views.status import StatusView
class Application(object):
"""
A 'front controller' class that stores references to all
top-level components of the application and facilitates
communication between them.
A reference to this class is injected into each controller
component of the application via its constructor. These
components then query the application object when they
need to access other parts of the system.
"""
_state_manager = None
_sane = None
_main_model = None
_main_controller = None
_main_view = None
_document_model = None
_document_controller = None
_document_view = None
_null_page_model = None
_page_controller = None
_page_view = None
_status_model = None
_status_controller = None
_status_view = None
_preferences_model = None
_preferences_controller = None
_preferences_view = None
_save_model = None
_save_controller = None
_save_view = None
_about_controller = None
_about_view = None
def __init__(self):
"""
Set up the config directory, logging, and state
persistence. Construct the Main MVC component triplet
(which will in turn construct all sub components).
Per
"""
self._init_config()
self._init_logging()
self._init_state()
self._init_sane()
self._init_main_components()
self._init_settings()
def _init_config(self):
"""Setup the config directory."""
if not os.path.exists(constants.APP_DIRECTORY):
os.mkdir(constants.APP_DIRECTORY)
def _init_logging(self):
"""Setup logging for the application."""
logging.config.fileConfig(constants.LOGGING_CONFIG)
def _init_state(self):
"""Setup the state manager."""
self._state_manager = GConfStateManager()
def _init_sane(self):
"""Setup SANE."""
self._sane = saneme.SaneMe(logging.getLogger("saneme"))
def _init_main_components(self):
"""
Create the main application components, which will
request creation of other components as necessary.
"""
self._main_model = MainModel(self)
self._main_controller = MainController(self)
self._main_view = MainView(self)
def _init_settings(self):
"""
Load current settings from the state manager and
poll for available scanners.
"""
self._main_model.load_state()
self.get_save_model().load_state()
self.get_preferences_model().load_state()
self._main_controller._update_available_scanners()
# PUBLIC METHODS
def run(self):
"""Execute the GTK main loop."""
assert isinstance(self._main_view, MainView)
self._main_view.show()
gtk.main()
def get_state_manager(self):
"""Return the L{GConfStateManager} component."""
assert isinstance(self._state_manager, GConfStateManager)
return self._state_manager
def get_sane(self):
"""Return the SaneMe object."""
assert isinstance(self._sane, saneme.SaneMe)
return self._sane
def get_main_model(self):
"""Return the L{MainModel} component."""
assert self._main_model
return self._main_model
def get_main_controller(self):
"""Return the L{MainController} component."""
assert self._main_controller
return self._main_controller
def get_main_view(self):
"""Return the L{MainView} component."""
assert self._main_view
return self._main_view
def get_document_model(self):
"""Return the L{DocumentModel} component."""
if not self._document_model:
self._document_model = DocumentModel(self)
return self._document_model
def get_document_controller(self):
"""Return the L{DocumentController} component."""
if not self._document_controller:
self._document_controller = DocumentController(self)
return self._document_controller
def get_document_view(self):
"""Return the L{DocumentView} component."""
if not self._document_view:
self._document_view = DocumentView(self)
return self._document_view
def get_null_page_model(self):
"""
Return an empty L{PageModel} object.
This is the PageModel that is used when no
pages have been scanned.
"""
if not self._null_page_model:
self._null_page_model = PageModel(self)
return self._null_page_model
def get_current_page_model(self):
"""
Return the current/active L{PageModel} object.
This is a convenience function.
"""
return self.get_page_controller().get_current_page_model()
def get_page_controller(self):
"""Return the L{PageController} component."""
if not self._page_controller:
self._page_controller = PageController(self)
return self._page_controller
def get_page_view(self):
"""Return the L{PageView} component."""
if not self._page_view:
self._page_view = PageView(self)
return self._page_view
def get_status_model(self):
"""Return the L{StatusModel} component."""
if not self._status_model:
self._status_model = StatusModel(self)
return self._status_model
def get_status_controller(self):
"""Return the L{StatusController} component."""
if not self._status_controller:
self._status_controller = StatusController(self)
return self._status_controller
def get_status_view(self):
"""Return the L{StatusView} component."""
if not self._status_view:
self._status_view = StatusView(self)
return self._status_view
def get_preferences_model(self):
"""Return the L{PreferencesModel} component."""
if not self._preferences_model:
self._preferences_model = PreferencesModel(self)
return self._preferences_model
def get_preferences_controller(self):
"""Return the L{PreferencesController} component."""
if not self._preferences_controller:
self._preferences_controller = PreferencesController(self)
return self._preferences_controller
def get_preferences_view(self):
"""Return the L{PreferencesView} component."""
if not self._preferences_view:
self._preferences_view = PreferencesView(self)
return self._preferences_view
def show_preferences_dialog(self):
"""
Show the preferences dialog.
This is a convenience function.
"""
self.get_preferences_controller().run()
def get_save_model(self):
"""Return the L{SaveModel} component."""
if not self._save_model:
self._save_model = SaveModel(self)
return self._save_model
def get_save_controller(self):
"""Return the L{SaveController} component."""
if not self._save_controller:
self._save_controller = SaveController(self)
return self._save_controller
def get_save_view(self):
"""Return the L{SaveView} component."""
if not self._save_view:
self._save_view = SaveView(self)
return self._save_view
def show_save_dialog(self):
"""
Show the save dialog.
This is a convenience function.
"""
self.get_save_controller().run()
def get_about_controller(self):
"""Return the L{SaveController} component."""
if not self._about_controller:
self._about_controller = AboutController(self)
return self._about_controller
def get_about_view(self):
"""Return the L{SaveView} component."""
if not self._about_view:
self._about_view = AboutView(self)
return self._about_view
def show_about_dialog(self):
"""
Show the about dialog.
This is a convenience function.
"""
self.get_about_controller().run()
| gpl-3.0 | -7,031,260,171,684,142,000 | 31.565868 | 71 | 0.613496 | false |
Khurramjaved96/Dicta | Evaluation/corner_refinement.py | 1 | 20176 | import numpy as np
import cv2
import tensorflow as tf
class corner_finder():
def __init__(self, CHECKPOINT_DIR="../corner_full_data_shallow"):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
self.sess = tf.Session(config=config)
# In[ ]:
def weight_variable(shape, name="temp"):
initial = tf.truncated_normal(shape, stddev=0.1, name=name)
return tf.Variable(initial)
def bias_variable(shape, name="temp"):
initial = tf.constant(0.1, shape=shape, name=name)
return tf.Variable(initial)
# In[ ]:
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# In[ ]:
with tf.variable_scope('Corner'):
W_conv1 = weight_variable([5, 5, 3, 10], name="W_conv1")
b_conv1 = bias_variable([10], name="b_conv1")
# In[ ]:
self.x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
self.y_ = tf.placeholder(tf.float32, shape=[None, 2])
x = self.x
y_ = self.y_
h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 10, 20], name="W_conv2")
b_conv2 = bias_variable([20], name="b_conv2")
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, 20, 30], name="W_conv3")
b_conv3 = bias_variable([30], name="b_conv3")
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_conv4 = weight_variable([5, 5, 30, 40], name="W_conv4")
b_conv4 = bias_variable([40], name="b_conv4")
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
print h_pool4.get_shape()
temp_size = h_pool4.get_shape()
temp_size = temp_size[1] * temp_size[2] * temp_size[3]
# In[ ]:
W_fc1 = weight_variable([int(temp_size), 300], name="W_fc1")
b_fc1 = bias_variable([300], name="b_fc1")
h_pool4_flat = tf.reshape(h_pool4, [-1, int(temp_size)])
h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
# In[ ]:
# Adding dropout
self.keep_prob = tf.placeholder(tf.float32)
keep_prob = self.keep_prob
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# In[ ]:
W_fc2 = weight_variable([300, 2], name="W_fc2")
b_fc2 = bias_variable([2], name="b_fc2")
self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# W_fc3 = weight_variable([500, 500], name="W_fc3")
# b_fc3 = bias_variable([500], name="b_fc3")
# y_conv = tf.matmul(y_conv, W_fc3) + b_fc3
# W_fc4 = weight_variable([500, 2], name="W_fc4")
# b_fc4 = bias_variable([2], name="b_fc4")
# y_conv = tf.matmul(h_fc1_drop, W_fc4) + b_fc4
# In[ ]:
cross_entropy = tf.nn.l2_loss(self.y_conv - y_)
mySum = tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
merged = tf.summary.merge_all()
all_vars = tf.GraphKeys.GLOBAL_VARIABLES
saver = tf.train.Saver( tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Corner'))
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
#print ("PRINTING CHECKPOINT PATH")
#print(ckpt.model_checkpoint_path)
init = saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
#print("Starting from scratch")
init = tf.global_variables_initializer()
self.sess.run(init)
def get_location(self,img):
ans_x =0.0
ans_y=0.0
this_is_temp = img.shape
RESIZE = 1200
img = cv2.resize(img, (RESIZE, RESIZE))
o_img = np.copy(img)
import time
y = None
x_start = 0
y_start = 0
up_scale_factor = (img.shape[1], img.shape[0])
crop_size = [img.shape[0] * .8, img.shape[1] * .8]
myImage = np.copy(o_img)
CROP_FRAC = .95
start = time.clock()
for counter in range(0, 60):
img_temp = cv2.resize(myImage, (32, 32))
img_temp = np.expand_dims(img_temp, axis=0)
response = self.y_conv.eval(feed_dict={
self.x: img_temp, self.keep_prob: 1.0}, session=self.sess)
response_up = response[0] / 32
response_up = response_up * up_scale_factor
y = response_up + (x_start, y_start)
img1 = np.copy(img)
#cv2.circle(img1, (int(response_up[0] + x_start), int(response_up[1] + y_start)), 2, (255, 0, 0), 2)
#cv2.imwrite("../sample_" + str(counter) + ".jpg", img1)
# cv2.waitKey(0)
#cv2.circle(img_temp[0], (int(response[0][0]), int(response[0][1])), 2, (255, 0, 0), 2)
#cv2.imwrite("../down_result" + str(counter) + ".jpg", img_temp[0])
# cv2.waitKey(0)
x_loc = int(y[0])
y_loc = int(y[1])
if x_loc > myImage.shape[0] / 2:
start_x = min(x_loc + int(round(myImage.shape[0] * CROP_FRAC / 2)), myImage.shape[0]) - int(round(
myImage.shape[0] * CROP_FRAC))
else:
start_x = max(x_loc - int(myImage.shape[0] * CROP_FRAC / 2), 0)
if y_loc > myImage.shape[1] / 2:
start_y = min(y_loc + int(myImage.shape[1] * CROP_FRAC / 2), myImage.shape[1]) - int(
myImage.shape[1] * CROP_FRAC)
else:
start_y = max(y_loc - int(myImage.shape[1] * CROP_FRAC / 2), 0)
ans_x+= start_x
ans_y+= start_y
myImage = myImage[start_y:start_y + int(myImage.shape[0] * CROP_FRAC),
start_x:start_x + int(myImage.shape[1] * CROP_FRAC)]
img = img[start_y:start_y + int(img.shape[0] * CROP_FRAC), start_x:start_x + int(img.shape[1] * CROP_FRAC)]
up_scale_factor = (img.shape[1], img.shape[0])
end = time.clock()
ans_x += y[0]
ans_y += y[1]
ans_x/=RESIZE
ans_x*= this_is_temp[1]
ans_y /=RESIZE
ans_y*= this_is_temp[0]
# print "TIME : ", end - start
return (int(round(ans_x)), int(round(ans_y)))
def get_location1(self,img):
ans_x =0.0
ans_y=0.0
this_is_temp = img.shape
RESIZE = 1200
# img = cv2.resize(img, (RESIZE, RESIZE))
o_img = np.copy(img)
draw = np.copy(img)
import time
y = None
x_start = 0
y_start = 0
up_scale_factor = (img.shape[1], img.shape[0])
crop_size = [img.shape[0] * .8, img.shape[1] * .8]
myImage = np.copy(o_img)
CROP_FRAC = .95
start = time.clock()
end_x=img.shape[1]
end_y = img.shape[0]
start_x=0
start_y = 0
no_of_calls = 40
while(myImage.shape[0]>5 and myImage.shape[1]>5):
img_temp = cv2.resize(myImage, (32, 32))
img_temp = np.expand_dims(img_temp, axis=0)
response = self.y_conv.eval(feed_dict={
self.x: img_temp, self.keep_prob: 1.0},session=self.sess)
response_up = response[0] / 32
response_up = response_up * up_scale_factor
y = response_up + (x_start, y_start)
img1 = np.copy(img)
# cv2.circle(img1, (int(response_up[0] + x_start), int(response_up[1] + y_start)), 2, (0, int(255-(255.0/no_of_calls)*counter), int(255.0/no_of_calls)*counter), 4)
# cv2.imwrite("../sample_" + str(counter) + ".jpg", img1)
# cv2.waitKey(0)
#cv2.circle(img_temp[0], (int(response[0][0]), int(response[0][1])), 2, (255, 0, 0), 2)
#cv2.imwrite("../down_result" + str(counter) + ".jpg", img_temp[0])
# cv2.waitKey(0)z
# cv2.line(draw, (int(ans_x), int(ans_y)), (int(ans_x),int(end_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(ans_x), int(ans_y)), (int(end_x), int(ans_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(end_x), int(end_y)), (int(end_x), int(ans_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(end_x), int(end_y)), (int(ans_x), int(end_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# # cv2.line(img, (b[1]+a.shape[1], b[2]), (b[1]+a.shape[1], b[2]+a.shape[0]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# # cv2.line(img, (b[1]+a.shape[1], b[2]+a.shape[0]), (b[1], b[2]+a.shape[0]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# # cv2.line(img, (b[1], b[2]+a.shape[0]), (b[1], b[2]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# cv2.imwrite("../bas"+str(counter)+".jpg",draw)
x_loc = int(y[0])
y_loc = int(y[1])
if x_loc > myImage.shape[1] / 2:
start_x = min(x_loc + int(round(myImage.shape[1] * CROP_FRAC / 2)), myImage.shape[1]) - int(round(
myImage.shape[1] * CROP_FRAC))
else:
start_x = max(x_loc - int(myImage.shape[1] * CROP_FRAC / 2), 0)
if y_loc > myImage.shape[0] / 2:
start_y = min(y_loc + int(myImage.shape[0] * CROP_FRAC / 2), myImage.shape[0]) - int(
myImage.shape[0] * CROP_FRAC)
else:
start_y = max(y_loc - int(myImage.shape[0] * CROP_FRAC / 2), 0)
ans_x+= start_x
ans_y+= start_y
myImage = myImage[start_y:start_y + int(myImage.shape[0] * CROP_FRAC),
start_x:start_x + int(myImage.shape[1] * CROP_FRAC)]
img = img[start_y:start_y + int(img.shape[0] * CROP_FRAC), start_x:start_x + int(img.shape[1] * CROP_FRAC)]
up_scale_factor = (img.shape[1], img.shape[0])
end_y = ans_y + start_y + int(myImage.shape[0] * CROP_FRAC)
end_x = ans_x + start_x + int(myImage.shape[1] * CROP_FRAC)
end = time.clock()
ans_x += y[0]
ans_y += y[1]
# ans_x/=RESIZE
# ans_x*= this_is_temp[1]
# ans_y /=RESIZE
# ans_y*= this_is_temp[0]
# print "TIME : ", end - start
return (int(round(ans_x)), int(round(ans_y)))
# In[ ]:
class corner_finder_aug():
def __init__(self, CHECKPOINT_DIR="../corner_withoutbg1"):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.1
self.sess = tf.Session(config=config)
train_image = np.load("train_image_bg1.npy")
mean_train = np.mean(train_image, axis=(0,1,2))
mean_train = np.expand_dims(mean_train, axis=0)
mean_train = np.expand_dims(mean_train, axis=0)
self.mean_train = np.expand_dims(mean_train, axis=0)
# In[ ]:
def weight_variable(shape, name="temp"):
initial = tf.truncated_normal(shape, stddev=0.1, name=name)
return tf.Variable(initial)
def bias_variable(shape, name="temp"):
initial = tf.constant(0.1, shape=shape, name=name)
return tf.Variable(initial)
# In[ ]:
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# In[ ]:
with tf.variable_scope('Corner'):
W_conv1 = weight_variable([5, 5, 3, 10], name="W_conv1")
b_conv1 = bias_variable([10], name="b_conv1")
# In[ ]:
self.x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
self.y_ = tf.placeholder(tf.float32, shape=[None, 2])
x = self.x
y_ = self.y_
h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 10, 20], name="W_conv2")
b_conv2 = bias_variable([20], name="b_conv2")
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, 20, 30], name="W_conv3")
b_conv3 = bias_variable([30], name="b_conv3")
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_conv4 = weight_variable([5, 5, 30, 40], name="W_conv4")
b_conv4 = bias_variable([40], name="b_conv4")
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
print h_pool4.get_shape()
temp_size = h_pool4.get_shape()
temp_size = temp_size[1] * temp_size[2] * temp_size[3]
# In[ ]:
W_fc1 = weight_variable([int(temp_size), 300], name="W_fc1")
b_fc1 = bias_variable([300], name="b_fc1")
h_pool4_flat = tf.reshape(h_pool4, [-1, int(temp_size)])
h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
# In[ ]:
# Adding dropout
self.keep_prob = tf.placeholder(tf.float32)
keep_prob = self.keep_prob
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# In[ ]:
W_fc2 = weight_variable([300, 2], name="W_fc2")
b_fc2 = bias_variable([2], name="b_fc2")
self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# W_fc3 = weight_variable([500, 500], name="W_fc3")
# b_fc3 = bias_variable([500], name="b_fc3")
# y_conv = tf.matmul(y_conv, W_fc3) + b_fc3
# W_fc4 = weight_variable([500, 2], name="W_fc4")
# b_fc4 = bias_variable([2], name="b_fc4")
# y_conv = tf.matmul(h_fc1_drop, W_fc4) + b_fc4
# In[ ]:
cross_entropy = tf.nn.l2_loss(self.y_conv - y_)
mySum = tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
merged = tf.summary.merge_all()
all_vars = tf.GraphKeys.GLOBAL_VARIABLES
saver = tf.train.Saver( tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Corner'))
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
#print ("PRINTING CHECKPOINT PATH")
#print(ckpt.model_checkpoint_path)
init = saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
#print("Starting from scratch")
init = tf.global_variables_initializer()
self.sess.run(init)
def get_location1(self,img):
ans_x =0.0
ans_y=0.0
this_is_temp = img.shape
RESIZE = 1200
# img = cv2.resize(img, (RESIZE, RESIZE))
o_img = np.copy(img)
draw = np.copy(img)
import time
y = None
x_start = 0
y_start = 0
up_scale_factor = (img.shape[1], img.shape[0])
crop_size = [img.shape[0] * .8, img.shape[1] * .8]
myImage = np.copy(o_img)
CROP_FRAC = .85
start = time.clock()
end_x=img.shape[1]
end_y = img.shape[0]
start_x=0
start_y = 0
no_of_calls = 40
while(myImage.shape[0]>10 and myImage.shape[1]>10):
img_temp = cv2.resize(myImage, (32, 32))
img_temp = np.expand_dims(img_temp, axis=0)
img_temp = img_temp -self.mean_train
response = self.y_conv.eval(feed_dict={
self.x: img_temp, self.keep_prob: 1.0},session=self.sess)
response_up = response[0] / 32
response_up = response_up * up_scale_factor
y = response_up + (x_start, y_start)
img1 = np.copy(img)
# cv2.circle(img1, (int(response_up[0] + x_start), int(response_up[1] + y_start)), 2, (0, int(255-(255.0/no_of_calls)*counter), int(255.0/no_of_calls)*counter), 4)
# cv2.imwrite("../sample_" + str(counter) + ".jpg", img1)
# cv2.waitKey(0)
#cv2.circle(img_temp[0], (int(response[0][0]), int(response[0][1])), 2, (255, 0, 0), 2)
#cv2.imwrite("../down_result" + str(counter) + ".jpg", img_temp[0])
# cv2.waitKey(0)z
# cv2.line(draw, (int(ans_x), int(ans_y)), (int(ans_x),int(end_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(ans_x), int(ans_y)), (int(end_x), int(ans_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(end_x), int(end_y)), (int(end_x), int(ans_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# cv2.line(draw, (int(end_x), int(end_y)), (int(ans_x), int(end_y)), (0, int(255-(155.0/no_of_calls)*counter), int(155.0/no_of_calls)*counter), 3)
# # cv2.line(img, (b[1]+a.shape[1], b[2]), (b[1]+a.shape[1], b[2]+a.shape[0]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# # cv2.line(img, (b[1]+a.shape[1], b[2]+a.shape[0]), (b[1], b[2]+a.shape[0]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# # cv2.line(img, (b[1], b[2]+a.shape[0]), (b[1], b[2]), (0, 255 - 60*temp_counter, 60*temp_counter), 18)
# cv2.imwrite("../bas"+str(counter)+".jpg",draw)
x_loc = int(y[0])
y_loc = int(y[1])
if x_loc > myImage.shape[1] / 2:
start_x = min(x_loc + int(round(myImage.shape[1] * CROP_FRAC / 2)), myImage.shape[1]) - int(round(
myImage.shape[1] * CROP_FRAC))
else:
start_x = max(x_loc - int(myImage.shape[1] * CROP_FRAC / 2), 0)
if y_loc > myImage.shape[0] / 2:
start_y = min(y_loc + int(myImage.shape[0] * CROP_FRAC / 2), myImage.shape[0]) - int(
myImage.shape[0] * CROP_FRAC)
else:
start_y = max(y_loc - int(myImage.shape[0] * CROP_FRAC / 2), 0)
ans_x+= start_x
ans_y+= start_y
myImage = myImage[start_y:start_y + int(myImage.shape[0] * CROP_FRAC),
start_x:start_x + int(myImage.shape[1] * CROP_FRAC)]
img = img[start_y:start_y + int(img.shape[0] * CROP_FRAC), start_x:start_x + int(img.shape[1] * CROP_FRAC)]
up_scale_factor = (img.shape[1], img.shape[0])
end_y = ans_y + start_y + int(myImage.shape[0] * CROP_FRAC)
end_x = ans_x + start_x + int(myImage.shape[1] * CROP_FRAC)
end = time.clock()
ans_x += y[0]
ans_y += y[1]
# ans_x/=RESIZE
# ans_x*= this_is_temp[1]
# ans_y /=RESIZE
# ans_y*= this_is_temp[0]
# print "TIME : ", end - start
return (int(round(ans_x)), int(round(ans_y)))
# In[ ]:
if __name__ == "__main__":
pass
| apache-2.0 | 5,846,053,030,291,351,000 | 36.020183 | 175 | 0.509814 | false |
emesene/emesene | emesene/e3/common/MessageFormatter.py | 2 | 5385 | # -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import time
import calendar
import xml.sax.saxutils
import e3
class MessageFormatter(object):
'''a class that holds the state of a conversation and
format the messages according to the state and the
format provided
tag list:
%ALIAS%: the alias of the account
%DISPLAYNAME%: the alias if exist otherwise the nick if exist
otherwise the account
%TIME%: the time of the message
%SHORTTIME%: the time of the message in format HH:MM:SS
%MESSAGE%: the message with format
%RAWMESSAGE%: the message without format
%STATUS%: the status of the account
%NL%: new line
some basic formating is allowed as html tags:
b: bold
i: italic
u: underline
br: new line
'''
def __init__(self, new_line='<br/>'):
'''constructor'''
self.new_line = new_line
# default formats
self.incoming = '<div class="message-incomming">'\
'<b>%DISPLAYNAME%</b>:%NL% [%SHORTTIME%] %MESSAGE%%NL%</div>'
self.outgoing = '<div class="message-outgoing">'\
'<b>%DISPLAYNAME%</b>:%NL% [%SHORTTIME%] %MESSAGE%%NL%</div>'
self.consecutive_incoming = '<div class="consecutive-incomming">'\
' [%SHORTTIME%] %MESSAGE%%NL%</div>'
self.consecutive_outgoing = '<div class="consecutive-outgoing">'\
' [%SHORTTIME%] %MESSAGE%%NL%</div>'
self.offline_incoming = \
'<i>(offline message)</i><b>%DISPLAYNAME%</b>:%NL% [%SHORTTIME%] %MESSAGE%%NL%'
self.information = '<i>%MESSAGE%</i>%NL%'
self.history = '<div class="message-history">'\
'<b>%TIME% %DISPLAYNAME%</b>: %MESSAGE%%NL%</div>'
def format_message(self, template, message):
'''format a message from the template, include new line
if new_line is True'''
template = template.replace('%NL%', self.new_line)
template = template.replace('%MESSAGE%', escape(message))
return template
def format_information(self, message):
'''format an info message from the template, include new line
if new_line is True'''
return self.format_message(self.information, message)
def format(self, msg):
'''format the message according to the template'''
if msg.type is None:
msg.type = e3.Message.TYPE_MESSAGE
if not msg.timestamp is None:
timestamp = calendar.timegm(msg.timestamp.timetuple())
else:
timestamp = time.time()
if msg.type == e3.Message.TYPE_MESSAGE:
if msg.first:
if msg.incoming:
template = self.incoming
else:
template = self.outgoing
else:
if msg.incoming:
template = self.consecutive_incoming
else:
template = self.consecutive_outgoing
if msg.type == e3.Message.TYPE_OLDMSG:
template = self.history
if msg.type == e3.Message.TYPE_FLNMSG:
template = self.offline_incoming
formated_time = time.strftime('%c', time.gmtime(timestamp))
formated_short_time = time.strftime('%X', time.localtime(timestamp))
template = template.replace('%ALIAS%',
escape(msg.alias))
template = template.replace('%DISPLAYNAME%',
escape(msg.display_name))
template = template.replace('%TIME%',
escape(formated_time))
template = template.replace('%SHORTTIME%',
escape(formated_short_time))
template = template.replace('%STATUS%',
escape(msg.status))
template = template.replace('%NL%', self.new_line)
is_raw = False
if '%MESSAGE%' in template:
(first, last) = template.split('%MESSAGE%')
elif '%RAWMESSAGE%' in template:
(first, last) = template.split('%RAWMESSAGE%')
is_raw = True
else:
first = template
last = ''
if not is_raw:
middle = e3.common.add_style_to_message(msg.message, msg.style, False)
msg.message = first + middle + last
return msg.message
dic = {
'\"' : '"',
'\'' : '''
}
dic_inv = {
'"' :'\"',
''' :'\''
}
def escape(string_):
'''replace the values on dic keys with the values'''
return xml.sax.saxutils.escape(string_, dic)
def unescape(string_):
'''replace the values on dic_inv keys with the values'''
return xml.sax.saxutils.unescape(string_, dic_inv)
| gpl-3.0 | 7,562,463,049,945,373,000 | 32.65625 | 94 | 0.592758 | false |
levelrf/level_basestation | spectrum_sense/add_client.py | 1 | 1069 | import os
#This script is used for adding a new client to the network. Once gnuradio sees a new client device request for bandwidth, it passes the data it got from that packet to this script which gives it a clientID, a frequency, and a bandwidth. It then waits for the client to reply with an ACK and a measurement of the interference it experiences. The database is updated and that information is used to improve the network performance overall (with assign_channels.py)
#Takes in these arguments in this order:
#freq - frequency that the client is transmitting it's beacon on, so it can send information back
#bw - bandwidth that the client is requesting. This is encoded in the packet
def add(freq, bw):
#For now, we're going to grab the open channel that has the most "goodness" that we know of. This is from a FIFO that spectrum_sense is continiously building.
#FIXME We should eventually change this to be more intelligent.
assigned_freq =
assigned_bw = 100000 #This is in Hz (ie 100 kHz).
#FIXME Dynamically allocate bandwidth
| gpl-3.0 | 3,795,588,408,128,763,000 | 61.882353 | 463 | 0.76333 | false |
openstack/swift | test/unit/account/test_auditor.py | 1 | 5053 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import itertools
import unittest
import time
import os
import random
from swift.account import auditor
from swift.common.storage_policy import POLICIES
from swift.common.utils import Timestamp
from test.debug_logger import debug_logger
from test.unit import patch_policies, with_tempdir
from test.unit.account.test_backend import (
AccountBrokerPreTrackContainerCountSetup)
@patch_policies
class TestAuditorRealBrokerMigration(
AccountBrokerPreTrackContainerCountSetup, unittest.TestCase):
def test_db_migration(self):
# add a few containers
policies = itertools.cycle(POLICIES)
num_containers = len(POLICIES) * 3
per_policy_container_counts = defaultdict(int)
for i in range(num_containers):
name = 'test-container-%02d' % i
policy = next(policies)
self.broker.put_container(name, next(self.ts).internal,
0, 0, 0, int(policy))
per_policy_container_counts[int(policy)] += 1
self.broker._commit_puts()
self.assertEqual(num_containers,
self.broker.get_info()['container_count'])
# still un-migrated
self.assertUnmigrated(self.broker)
# run auditor, and validate migration
conf = {'devices': self.tempdir, 'mount_check': False,
'recon_cache_path': self.tempdir}
test_auditor = auditor.AccountAuditor(conf, logger=debug_logger())
test_auditor.run_once()
self.restore_account_broker()
broker = auditor.AccountBroker(self.db_path)
# go after rows directly to avoid unintentional migration
with broker.get() as conn:
rows = conn.execute('''
SELECT storage_policy_index, container_count
FROM policy_stat
''').fetchall()
for policy_index, container_count in rows:
self.assertEqual(container_count,
per_policy_container_counts[policy_index])
class TestAuditorRealBroker(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
@with_tempdir
def test_db_validate_fails(self, tempdir):
ts = (Timestamp(t).internal for t in itertools.count(int(time.time())))
db_path = os.path.join(tempdir, 'sda', 'accounts',
'0', '0', '0', 'test.db')
broker = auditor.AccountBroker(db_path, account='a')
broker.initialize(next(ts))
# add a few containers
policies = itertools.cycle(POLICIES)
num_containers = len(POLICIES) * 3
per_policy_container_counts = defaultdict(int)
for i in range(num_containers):
name = 'test-container-%02d' % i
policy = next(policies)
broker.put_container(name, next(ts), 0, 0, 0, int(policy))
per_policy_container_counts[int(policy)] += 1
broker._commit_puts()
self.assertEqual(broker.get_info()['container_count'], num_containers)
messed_up_policy = random.choice(list(POLICIES))
# now mess up a policy_stats table count
with broker.get() as conn:
conn.executescript('''
UPDATE policy_stat
SET container_count = container_count - 1
WHERE storage_policy_index = %d;
''' % int(messed_up_policy))
# validate it's messed up
policy_stats = broker.get_policy_stats()
self.assertEqual(
policy_stats[int(messed_up_policy)]['container_count'],
per_policy_container_counts[int(messed_up_policy)] - 1)
# do an audit
conf = {'devices': tempdir, 'mount_check': False,
'recon_cache_path': tempdir}
test_auditor = auditor.AccountAuditor(conf, logger=self.logger)
test_auditor.run_once()
# validate errors
self.assertEqual(test_auditor.failures, 1)
error_lines = test_auditor.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
error_message = error_lines[0]
self.assertIn(broker.db_file, error_message)
self.assertIn('container_count', error_message)
self.assertIn('does not match', error_message)
self.assertEqual(test_auditor.logger.get_increment_counts(),
{'failures': 1})
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 563,742,927,070,474,900 | 36.42963 | 79 | 0.63111 | false |
RPi-Distro/python-gpiozero | gpiozero/pins/pigpio.py | 3 | 21491 | # GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Dave Jones <[email protected]>
# Copyright (c) 2016 BuildTools <[email protected]>
# Copyright (c) 2016 Andrew Scheller <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import os
import pigpio
from . import SPI
from .pi import PiPin, PiFactory, SPI_HARDWARE_PINS
from .data import pi_info
from ..devices import Device
from ..mixins import SharedMixin
from ..exc import (
PinInvalidFunction,
PinSetInput,
PinFixedPull,
PinInvalidPull,
PinInvalidBounce,
PinInvalidState,
SPIBadArgs,
SPIInvalidClockMode,
PinPWMFixedValue,
DeviceClosed
)
class PiGPIOFactory(PiFactory):
"""
Extends :class:`~gpiozero.pins.pi.PiFactory`. Uses the `pigpio`_ library to
interface to the Pi's GPIO pins. The pigpio library relies on a daemon
(:command:`pigpiod`) to be running as root to provide access to the GPIO
pins, and communicates with this daemon over a network socket.
While this does mean only the daemon itself should control the pins, the
architecture does have several advantages:
* Pins can be remote controlled from another machine (the other
machine doesn't even have to be a Raspberry Pi; it simply needs the
`pigpio`_ client library installed on it)
* The daemon supports hardware PWM via the DMA controller
* Your script itself doesn't require root privileges; it just needs to
be able to communicate with the daemon
You can construct pigpio pins manually like so::
from gpiozero.pins.pigpio import PiGPIOFactory
from gpiozero import LED
factory = PiGPIOFactory()
led = LED(12, pin_factory=factory)
This is particularly useful for controlling pins on a remote machine. To
accomplish this simply specify the host (and optionally port) when
constructing the pin::
from gpiozero.pins.pigpio import PiGPIOFactory
from gpiozero import LED
factory = PiGPIOFactory(host='192.168.0.2')
led = LED(12, pin_factory=factory)
.. note::
In some circumstances, especially when playing with PWM, it does appear
to be possible to get the daemon into "unusual" states. We would be
most interested to hear any bug reports relating to this (it may be a
bug in our pin implementation). A workaround for now is simply to
restart the :command:`pigpiod` daemon.
.. _pigpio: https://pypi.org/project/pigpio/
"""
def __init__(self, host=None, port=None):
super(PiGPIOFactory, self).__init__()
if host is None:
host = os.environ.get('PIGPIO_ADDR', 'localhost')
if port is None:
# XXX Use getservbyname
port = int(os.environ.get('PIGPIO_PORT', 8888))
self.pin_class = PiGPIOPin
self.spi_classes = {
('hardware', 'exclusive'): PiGPIOHardwareSPI,
('hardware', 'shared'): PiGPIOHardwareSPIShared,
('software', 'exclusive'): PiGPIOSoftwareSPI,
('software', 'shared'): PiGPIOSoftwareSPIShared,
}
self._connection = pigpio.pi(host, port)
# Annoyingly, pigpio doesn't raise an exception when it fails to make a
# connection; it returns a valid (but disconnected) pi object
if self.connection is None:
raise IOError('failed to connect to %s:%s' % (host, port))
self._host = host
self._port = port
self._spis = []
def close(self):
super(PiGPIOFactory, self).close()
# We *have* to keep track of SPI interfaces constructed with pigpio;
# if we fail to close them they prevent future interfaces from using
# the same pins
if self.connection:
while self._spis:
self._spis[0].close()
self.connection.stop()
self._connection = None
@property
def connection(self):
# If we're shutting down, the connection may have disconnected itself
# already. Unfortunately, the connection's "connected" property is
# rather buggy - disconnecting doesn't set it to False! So we're
# naughty and check an internal variable instead...
try:
if self._connection.sl.s is not None:
return self._connection
except AttributeError:
pass
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def _get_revision(self):
return self.connection.get_hardware_revision()
def spi(self, **spi_args):
intf = super(PiGPIOFactory, self).spi(**spi_args)
self._spis.append(intf)
return intf
def ticks(self):
return self._connection.get_current_tick()
@staticmethod
def ticks_diff(later, earlier):
# NOTE: pigpio ticks are unsigned 32-bit quantities that wrap every
# 71.6 minutes. The modulo below (oh the joys of having an *actual*
# modulo operator, unlike C's remainder) ensures the result is valid
# even when later < earlier due to wrap-around (assuming the duration
# measured is not longer than the period)
return ((later - earlier) % 0x100000000) / 1000000
class PiGPIOPin(PiPin):
"""
Extends :class:`~gpiozero.pins.pi.PiPin`. Pin implementation for the
`pigpio`_ library. See :class:`PiGPIOFactory` for more information.
.. _pigpio: http://abyz.co.uk/rpi/pigpio/
"""
_CONNECTIONS = {} # maps (host, port) to (connection, pi_info)
GPIO_FUNCTIONS = {
'input': pigpio.INPUT,
'output': pigpio.OUTPUT,
'alt0': pigpio.ALT0,
'alt1': pigpio.ALT1,
'alt2': pigpio.ALT2,
'alt3': pigpio.ALT3,
'alt4': pigpio.ALT4,
'alt5': pigpio.ALT5,
}
GPIO_PULL_UPS = {
'up': pigpio.PUD_UP,
'down': pigpio.PUD_DOWN,
'floating': pigpio.PUD_OFF,
}
GPIO_EDGES = {
'both': pigpio.EITHER_EDGE,
'rising': pigpio.RISING_EDGE,
'falling': pigpio.FALLING_EDGE,
}
GPIO_FUNCTION_NAMES = {v: k for (k, v) in GPIO_FUNCTIONS.items()}
GPIO_PULL_UP_NAMES = {v: k for (k, v) in GPIO_PULL_UPS.items()}
GPIO_EDGES_NAMES = {v: k for (k, v) in GPIO_EDGES.items()}
def __init__(self, factory, number):
super(PiGPIOPin, self).__init__(factory, number)
self._pull = 'up' if self.factory.pi_info.pulled_up(repr(self)) else 'floating'
self._pwm = False
self._bounce = None
self._callback = None
self._edges = pigpio.EITHER_EDGE
try:
self.factory.connection.set_mode(self.number, pigpio.INPUT)
except pigpio.error as e:
raise ValueError(e)
self.factory.connection.set_pull_up_down(self.number, self.GPIO_PULL_UPS[self._pull])
self.factory.connection.set_glitch_filter(self.number, 0)
def close(self):
if self.factory.connection:
self.frequency = None
self.when_changed = None
self.function = 'input'
self.pull = 'up' if self.factory.pi_info.pulled_up(repr(self)) else 'floating'
def _get_function(self):
return self.GPIO_FUNCTION_NAMES[self.factory.connection.get_mode(self.number)]
def _set_function(self, value):
if value != 'input':
self._pull = 'floating'
try:
self.factory.connection.set_mode(self.number, self.GPIO_FUNCTIONS[value])
except KeyError:
raise PinInvalidFunction('invalid function "%s" for pin %r' % (value, self))
def _get_state(self):
if self._pwm:
return (
self.factory.connection.get_PWM_dutycycle(self.number) /
self.factory.connection.get_PWM_range(self.number)
)
else:
return bool(self.factory.connection.read(self.number))
def _set_state(self, value):
if self._pwm:
try:
value = int(value * self.factory.connection.get_PWM_range(self.number))
if value != self.factory.connection.get_PWM_dutycycle(self.number):
self.factory.connection.set_PWM_dutycycle(self.number, value)
except pigpio.error:
raise PinInvalidState('invalid state "%s" for pin %r' % (value, self))
elif self.function == 'input':
raise PinSetInput('cannot set state of pin %r' % self)
else:
# write forces pin to OUTPUT, hence the check above
self.factory.connection.write(self.number, bool(value))
def _get_pull(self):
return self._pull
def _set_pull(self, value):
if self.function != 'input':
raise PinFixedPull('cannot set pull on non-input pin %r' % self)
if value != 'up' and self.factory.pi_info.pulled_up(repr(self)):
raise PinFixedPull('%r has a physical pull-up resistor' % self)
try:
self.factory.connection.set_pull_up_down(self.number, self.GPIO_PULL_UPS[value])
self._pull = value
except KeyError:
raise PinInvalidPull('invalid pull "%s" for pin %r' % (value, self))
def _get_frequency(self):
if self._pwm:
return self.factory.connection.get_PWM_frequency(self.number)
return None
def _set_frequency(self, value):
if not self._pwm and value is not None:
if self.function != 'output':
raise PinPWMFixedValue('cannot start PWM on pin %r' % self)
# NOTE: the pin's state *must* be set to zero; if it's currently
# high, starting PWM and setting a 0 duty-cycle *doesn't* bring
# the pin low; it stays high!
self.factory.connection.write(self.number, 0)
self.factory.connection.set_PWM_frequency(self.number, value)
self.factory.connection.set_PWM_range(self.number, 10000)
self.factory.connection.set_PWM_dutycycle(self.number, 0)
self._pwm = True
elif self._pwm and value is not None:
if value != self.factory.connection.get_PWM_frequency(self.number):
self.factory.connection.set_PWM_frequency(self.number, value)
self.factory.connection.set_PWM_range(self.number, 10000)
elif self._pwm and value is None:
self.factory.connection.write(self.number, 0)
self._pwm = False
def _get_bounce(self):
return None if not self._bounce else self._bounce / 1000000
def _set_bounce(self, value):
if value is None:
value = 0
elif value < 0:
raise PinInvalidBounce('bounce must be 0 or greater')
self.factory.connection.set_glitch_filter(self.number, int(value * 1000000))
def _get_edges(self):
return self.GPIO_EDGES_NAMES[self._edges]
def _set_edges(self, value):
f = self.when_changed
self.when_changed = None
try:
self._edges = self.GPIO_EDGES[value]
finally:
self.when_changed = f
def _call_when_changed(self, gpio, level, ticks):
super(PiGPIOPin, self)._call_when_changed(ticks, level)
def _enable_event_detect(self):
self._callback = self.factory.connection.callback(
self.number, self._edges, self._call_when_changed)
def _disable_event_detect(self):
if self._callback is not None:
self._callback.cancel()
self._callback = None
class PiGPIOHardwareSPI(SPI, Device):
"""
Hardware SPI implementation for the `pigpio`_ library. Uses the ``spi_*``
functions from the pigpio API.
.. _pigpio: http://abyz.co.uk/rpi/pigpio/
"""
def __init__(self, factory, port, device):
self._port = port
self._device = device
self._factory = factory
self._handle = None
super(PiGPIOHardwareSPI, self).__init__()
pins = SPI_HARDWARE_PINS[port]
self._factory.reserve_pins(
self,
pins['clock'],
pins['mosi'],
pins['miso'],
pins['select'][device]
)
self._spi_flags = 8 << 16
self._baud = 500000
self._handle = self._factory.connection.spi_open(
device, self._baud, self._spi_flags)
def _conflicts_with(self, other):
return not (
isinstance(other, PiGPIOHardwareSPI) and
(self._port, self._device) != (other._port, other._device)
)
def close(self):
try:
self._factory._spis.remove(self)
except (ReferenceError, ValueError):
# If the factory has died already or we're not present in its
# internal list, ignore the error
pass
if not self.closed:
self._factory.connection.spi_close(self._handle)
self._handle = None
self._factory.release_all(self)
super(PiGPIOHardwareSPI, self).close()
@property
def closed(self):
return self._handle is None or self._factory.connection is None
@property
def factory(self):
return self._factory
def __repr__(self):
try:
self._check_open()
return 'SPI(port=%d, device=%d)' % (self._port, self._device)
except DeviceClosed:
return 'SPI(closed)'
def _get_clock_mode(self):
return self._spi_flags & 0x3
def _set_clock_mode(self, value):
self._check_open()
if not 0 <= value < 4:
raise SPIInvalidClockMode("%d is not a valid SPI clock mode" % value)
self._factory.connection.spi_close(self._handle)
self._spi_flags = (self._spi_flags & ~0x3) | value
self._handle = self._factory.connection.spi_open(
self._device, self._baud, self._spi_flags)
def _get_select_high(self):
return bool((self._spi_flags >> (2 + self._device)) & 0x1)
def _set_select_high(self, value):
self._check_open()
self._factory.connection.spi_close(self._handle)
self._spi_flags = (self._spi_flags & ~0x1c) | (bool(value) << (2 + self._device))
self._handle = self._factory.connection.spi_open(
self._device, self._baud, self._spi_flags)
def _get_bits_per_word(self):
return (self._spi_flags >> 16) & 0x3f
def _set_bits_per_word(self, value):
self._check_open()
self._factory.connection.spi_close(self._handle)
self._spi_flags = (self._spi_flags & ~0x3f0000) | ((value & 0x3f) << 16)
self._handle = self._factory.connection.spi_open(
self._device, self._baud, self._spi_flags)
def transfer(self, data):
self._check_open()
count, data = self._factory.connection.spi_xfer(self._handle, data)
if count < 0:
raise IOError('SPI transfer error %d' % count)
# Convert returned bytearray to list of ints. XXX Not sure how non-byte
# sized words (aux intf only) are returned ... padded to 16/32-bits?
return [int(b) for b in data]
class PiGPIOSoftwareSPI(SPI, Device):
"""
Software SPI implementation for the `pigpio`_ library. Uses the ``bb_spi_*``
functions from the pigpio API.
.. _pigpio: http://abyz.co.uk/rpi/pigpio/
"""
def __init__(self, factory, clock_pin, mosi_pin, miso_pin, select_pin):
self._closed = True
self._select_pin = select_pin
self._clock_pin = clock_pin
self._mosi_pin = mosi_pin
self._miso_pin = miso_pin
self._factory = factory
super(PiGPIOSoftwareSPI, self).__init__()
self._factory.reserve_pins(
self,
clock_pin,
mosi_pin,
miso_pin,
select_pin,
)
self._spi_flags = 0
self._baud = 100000
try:
self._factory.connection.bb_spi_open(
select_pin, miso_pin, mosi_pin, clock_pin,
self._baud, self._spi_flags)
# Only set after opening bb_spi; if that fails then close() will
# also fail if bb_spi_close is attempted on an un-open interface
self._closed = False
except:
self.close()
raise
def _conflicts_with(self, other):
return not (
isinstance(other, PiGPIOSoftwareSPI) and
(self._select_pin) != (other._select_pin)
)
def close(self):
try:
self._factory._spis.remove(self)
except (ReferenceError, ValueError):
# If the factory has died already or we're not present in its
# internal list, ignore the error
pass
if not self.closed:
self._closed = True
self._factory.connection.bb_spi_close(self._select_pin)
self.factory.release_all(self)
super(PiGPIOSoftwareSPI, self).close()
@property
def closed(self):
return self._closed
def __repr__(self):
try:
self._check_open()
return (
'SPI(clock_pin=%d, mosi_pin=%d, miso_pin=%d, select_pin=%d)' % (
self._clock_pin, self._mosi_pin, self._miso_pin, self._select_pin
))
except DeviceClosed:
return 'SPI(closed)'
def _spi_flags(self):
return (
self._mode << 0 |
self._select_high << 2 |
self._lsb_first << 14 |
self._lsb_first << 15
)
def _get_clock_mode(self):
return self._spi_flags & 0x3
def _set_clock_mode(self, value):
self._check_open()
if not 0 <= value < 4:
raise SPIInvalidClockMode("%d is not a valid SPI clock mode" % value)
self._factory.connection.bb_spi_close(self._select_pin)
self._spi_flags = (self._spi_flags & ~0x3) | value
self._factory.connection.bb_spi_open(
self._select_pin, self._miso_pin, self._mosi_pin, self._clock_pin,
self._baud, self._spi_flags)
def _get_select_high(self):
return bool(self._spi_flags & 0x4)
def _set_select_high(self, value):
self._check_open()
self._factory.connection.bb_spi_close(self._select_pin)
self._spi_flags = (self._spi_flags & ~0x4) | (bool(value) << 2)
self._factory.connection.bb_spi_open(
self._select_pin, self._miso_pin, self._mosi_pin, self._clock_pin,
self._baud, self._spi_flags)
def _get_lsb_first(self):
return bool(self._spi_flags & 0xc000)
def _set_lsb_first(self, value):
self._check_open()
self._factory.connection.bb_spi_close(self._select_pin)
self._spi_flags = (
(self._spi_flags & ~0xc000)
| (bool(value) << 14)
| (bool(value) << 15)
)
self._factory.connection.bb_spi_open(
self._select_pin, self._miso_pin, self._mosi_pin, self._clock_pin,
self._baud, self._spi_flags)
def transfer(self, data):
self._check_open()
count, data = self._factory.connection.bb_spi_xfer(self._select_pin, data)
if count < 0:
raise IOError('SPI transfer error %d' % count)
# Convert returned bytearray to list of ints. bb_spi only supports
# byte-sized words so no issues here
return [int(b) for b in data]
class PiGPIOHardwareSPIShared(SharedMixin, PiGPIOHardwareSPI):
@classmethod
def _shared_key(cls, factory, port, device):
return (factory, port, device)
class PiGPIOSoftwareSPIShared(SharedMixin, PiGPIOSoftwareSPI):
@classmethod
def _shared_key(cls, factory, clock_pin, mosi_pin, miso_pin, select_pin):
return (factory, select_pin)
| bsd-3-clause | 8,622,872,391,645,558,000 | 35.926117 | 93 | 0.606626 | false |
rmkoesterer/uga | uga/__main__.py | 1 | 22945 | #!/usr/bin/env python
## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
import glob
from configparser import SafeConfigParser
from pkg_resources import resource_filename
import signal
import subprocess
import shutil
from uga import Parse
from uga import Process
from uga import Map
from uga import Fxns
import pickle
from Bio import bgzf
def main(args=None):
rerun = []
args = Parse.get_args(Parse.get_parser())
resubmit = False
if args.which in ['snv','snvgroup','meta','merge','resubmit','tools']:
if args.which == 'resubmit':
with open(args.dir + '/' + os.path.basename(args.dir) + '.args.pkl', 'rb') as p:
qsub = args.qsub if args.qsub else None
args,cfg = pickle.load(p)
if qsub:
cfg['qsub'] = qsub
with open(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.rerun', 'r') as f:
rerun = [int(line.rstrip()) for line in f]
cfg['replace'] = True
resubmit = True
else:
cfg = getattr(Parse, 'generate_' + args.which + '_cfg')(args.ordered_args)
elif args.which != 'settings':
cfg = getattr(Parse, 'generate_' + args.which + '_cfg')(args.ordered_args)
##### read settings file #####
ini = SafeConfigParser()
ini.read(resource_filename('uga', 'settings.ini'))
##### locate qsub wrapper #####
qsub_wrapper = ini.get('main','wrapper')
if 'qsub' in args and not os.access(ini.get('main','wrapper'),os.X_OK):
print(Process.print_error('uga qsub wrapper ' + ini.get('main','wrapper') + ' is not executable'))
return
##### distribute jobs #####
if args.which in ['snv','snvgroup','meta','merge','tools']:
run_type = 0
if cfg['cpus'] is not None and cfg['cpus'] > 1:
run_type = run_type + 1
if cfg['split'] and cfg['qsub'] is not None:
run_type = run_type + 10
if cfg['split_n'] and cfg['qsub'] is not None:
run_type = run_type + 100
if resubmit:
jobs_df = pd.read_table(cfg['out'] + '/' + cfg['out'] + '.jobs')
else:
if args.which in ['snv','tools']:
# generate regions dataframe with M rows, either from --snv-map or by splitting data file or --snv-region according to --mb
# run_type = 0: run as single job
# run_type = 1: --cpus C (distribute M regions over C cpus and run single job, 1 job C cpus)
# run_type = 10: --split (split M regions into single region jobs, M jobs 1 cpu)
# run_type = 100: --split-n N (distribute M regions over N jobs, N jobs 1 cpu)
# run_type = 11: --split, --cpus C (split M regions into chunks of size M / C and run M jobs, M jobs C cpus)
# run_type = 101: --split-n N, --cpus C (distribute M regions over N jobs and distribute each over C cpus, N jobs C cpus)
if cfg['region_file']:
jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
jobs_df['chr'] = [x.split(':')[0] for x in jobs_df['region']]
jobs_df['chr_idx'] = [int(x.split(':')[0].replace('X','23').replace('Y','24')) for x in jobs_df['region']]
jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
jobs_df['job'] = 1
jobs_df['cpu'] = 1
else:
snv_map = []
data_files = []
if args.which == 'snv':
for m in cfg['models']:
if cfg['models'][m]['file'] not in data_files:
snv_map.extend(Map.map(file=cfg['models'][m]['file'], mb = cfg['mb'], region = cfg['region']))
data_files.append(cfg['models'][m]['file'])
else:
snv_map.extend(Map.map(file=cfg['file'], mb = cfg['mb'], region = cfg['region']))
snv_map = list(set(snv_map))
jobs_df = pd.DataFrame({'region': snv_map, 'chr': [x.split(':')[0] for x in snv_map], 'chr_idx': [int(x.split(':')[0].replace('X','23').replace('Y','24')) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
jobs_df['job'] = 1
jobs_df['cpu'] = 1
del data_files
del snv_map
jobs_df.sort_values(by=['chr_idx','start'],inplace=True)
jobs_df = jobs_df[['chr','start','end','region','job','cpu']]
jobs_df.reset_index(drop=True,inplace=True)
if args.which in ['meta','merge']:
# generate regions dataframe with M rows, either from --snv-map or by splitting data file or --snv-region according to --mb
# run_type = 0: run as single job
# run_type = 1: --cpus C (distribute M regions over C cpus and run single job, 1 job C cpus)
# run_type = 10: --split (split M regions into single region jobs, M jobs 1 cpu)
# run_type = 100: --split-n N (distribute M regions over N jobs, N jobs 1 cpu)
# run_type = 11: --split, --cpus C (split M regions into chunks of size M / C and run M jobs, M jobs C cpus)
# run_type = 101: --split-n N, --cpus C (distribute M regions over N jobs and distribute each over C cpus, N jobs C cpus)
if cfg['region_file']:
jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
jobs_df['chr'] = [int(x.split(':')[0]) for x in jobs_df['region']]
jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
jobs_df['job'] = 1
jobs_df['cpu'] = 1
else:
snv_map = []
data_files = []
for f in cfg['files']:
if f not in data_files:
snv_map.extend(Map.map(file=cfg['files'][f], mb = cfg['mb'], region = cfg['region']))
data_files.append(cfg['files'][f])
snv_map = list(set(snv_map))
jobs_df = pd.DataFrame({'region': snv_map, 'chr': [int(x.split(':')[0]) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
jobs_df['job'] = 1
jobs_df['cpu'] = 1
del data_files
del snv_map
jobs_df = jobs_df[['chr','start','end','region','job','cpu']]
jobs_df.sort_values(by=['chr','start'],inplace=True)
jobs_df.reset_index(drop=True,inplace=True)
if args.which == 'snvgroup':
# generate regions dataframe with M rows from --snvgroup-map
# run_type = 0: run as single job
# run_type = 1: --cpus C (distribute M snvgroups over C cpus and run single job, 1 job C cpus)
# run_type = 10: --split (split M snvgroups into single region jobs, M jobs 1 cpu)
# run_type = 100: --split-n N (distribute M snvgroups over N jobs, N jobs 1 cpu)
# run_type = 101: --split-n N, --cpus C (distribute M snvgroups over N jobs and distribute each job over C cpus, N jobs C cpus)
if cfg['region_file']:
jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region','group_id'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
jobs_df['chr'] = [int(x.split(':')[0]) for x in jobs_df['region']]
jobs_df['chr_idx'] = 1
jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
jobs_df['job'] = 1
jobs_df['cpu'] = 1
jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
jobs_df.sort_values(by=['chr','start'],inplace=True)
jobs_df.reset_index(drop=True,inplace=True)
elif cfg['region']:
snv_map = []
data_files = []
for m in cfg['models']:
if cfg['models'][m]['file'] not in data_files:
snv_map.extend(Map.map(file=cfg['models'][m]['file'], mb = 1000, region = cfg['region']))
data_files.append(cfg['models'][m]['file'])
snv_map = list(set(snv_map))
jobs_df = pd.DataFrame({'region': snv_map, 'chr': [int(x.split(':')[0]) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
jobs_df['group_id'] = cfg['region']
jobs_df['job'] = 1
jobs_df['cpu'] = 1
del data_files
del snv_map
jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
jobs_df.sort_values(by=['chr','start'],inplace=True)
jobs_df.reset_index(drop=True,inplace=True)
else:
if cfg['snvgroup_map']:
snvgroup_map = pd.read_table(cfg['snvgroup_map'],header=None,names=['chr','pos','marker','group_id'], compression='gzip' if cfg['snvgroup_map'].split('.')[-1] == 'gz' else None)
jobs_df = snvgroup_map[['chr','pos','group_id']]
jobs_df=jobs_df.groupby(['chr','group_id'])
jobs_df = jobs_df.agg({'pos': [np.min,np.max]})
jobs_df.columns = ['start','end']
jobs_df['chr'] = jobs_df.index.get_level_values('chr')
jobs_df['group_id'] = jobs_df.index.get_level_values('group_id')
jobs_df['region'] = jobs_df.chr.map(str) + ':' + jobs_df.start.map(str) + '-' + jobs_df.end.map(str)
jobs_df['job'] = 1
jobs_df['cpu'] = 1
jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
jobs_df.drop_duplicates(inplace=True)
jobs_df.sort_values(by=['chr','start'],inplace=True)
jobs_df.reset_index(drop=True,inplace=True)
if jobs_df.empty:
print(Process.print_error('job list is empty, no variants found in region/s specified'))
return
if run_type == 1:
n = int(np.ceil(jobs_df.shape[0] / float(cfg['cpus'])))
n_remain = int(jobs_df.shape[0] - (n-1) * cfg['cpus'])
jobs_df['cpu'] = np.append(np.repeat(list(range(cfg['cpus']))[:n_remain],n),np.repeat(list(range(cfg['cpus']))[n_remain:],n-1)).astype(np.int64) + 1
elif run_type == 10:
jobs_df['job'] = jobs_df.index.values + 1
elif run_type == 100:
n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
jobs_df['job'] = np.append(np.repeat(list(range(cfg['split_n']))[:n_remain],n),np.repeat(list(range(cfg['split_n']))[n_remain:],n-1)).astype(np.int64) + 1
elif run_type == 11 and args.which != 'snvgroup':
cfg['split_n'] = int(np.ceil(jobs_df.shape[0] / float(cfg['cpus'])))
n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
jobs_df['job'] = np.append(np.repeat(list(range(cfg['split_n']))[:n_remain],n),np.repeat(list(range(cfg['split_n']))[n_remain:],n-1)).astype(np.int64) + 1
for i in range(1,int(max(jobs_df['job'])) + 1):
n = int(np.ceil(jobs_df[jobs_df['job'] == i].shape[0] / float(cfg['cpus'])))
n_remain = int(jobs_df[jobs_df['job'] == i].shape[0] - (n-1) * cfg['cpus'])
jobs_df.loc[jobs_df['job'] == i,'cpu'] = np.append(np.repeat(list(range(cfg['cpus']))[:n_remain],n),np.repeat(list(range(cfg['cpus']))[n_remain:],n-1)).astype(np.int64) + 1
cfg['split'] = None
elif run_type == 101:
n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
jobs_df['job'] = np.append(np.repeat(list(range(cfg['split_n']))[:n_remain],n),np.repeat(list(range(cfg['split_n']))[n_remain:],n-1)).astype(np.int64) + 1
for i in range(1,int(max(jobs_df['job'])) + 1):
n = int(np.ceil(jobs_df[jobs_df['job'] == i].shape[0] / float(cfg['cpus'])))
n_remain = int(jobs_df[jobs_df['job'] == i].shape[0] - (n-1) * cfg['cpus'])
jobs_df.loc[jobs_df['job'] == i,'cpu'] = np.append(np.repeat(list(range(cfg['cpus']))[:n_remain],n),np.repeat(list(range(cfg['cpus']))[n_remain:],n-1)).astype(np.int64) + 1
if int(max(jobs_df['job'])) + 1 > 100000:
print(Process.print_error('number of jobs exceeds 100,000, consider using --split-n to reduce the total number of jobs'))
return
if args.which in ['snv','snvgroup','meta','merge','tools']:
print('detected run type ' + str(run_type) + ' ...')
if len(rerun) == 0:
if int(max(jobs_df['job'])) > 1 and cfg['qsub'] is not None:
if 'mb' in cfg:
print(' ' + str(jobs_df.shape[0]) + ' regions of size ' + str(cfg['mb']) + 'mb detected')
else:
print(' ' + str(jobs_df.shape[0]) + ' regions detected')
print(' an array containing ' + str(int(max(jobs_df['job']))) + ' tasks will be submitted')
print(' <= ' + str(max(np.bincount(jobs_df['job']))) + ' regions per task')
print(' <= ' + str(int(max(jobs_df['cpu']))) + ' cpus per task')
print(' qsub options: ' + cfg['qsub'])
print(' output directory: ' + cfg['out'])
print(' replace: ' + str(cfg['replace']))
input_var = None
while input_var not in ['y','n','Y','N']:
input_var = input('\nsubmit jobs (yY/nN)? ')
if input_var.lower() == 'n':
print('canceled by user')
return
if os.path.exists(cfg['out']):
if args.replace:
print('deleting old data')
try:
shutil.rmtree(cfg['out'])
except OSError:
print(Process.print_error('unable to replace results directory ' + cfg['out']))
else:
print(Process.print_error('results directory ' + cfg['out'] + ' already exists, use --replace to overwrite existing results'))
return
try:
os.mkdir(cfg['out'])
except OSError:
pass
with open(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.args.pkl', 'wb') as p:
pickle.dump([args, cfg], p)
if run_type in [10,11,100,101] and jobs_df.shape[0] > 1:
print("initializing job array database ...")
try:
os.mkdir(cfg['out'] + '/temp')
except OSError:
pass
for j in range(1, int(max(jobs_df['job'])) + 1):
try:
os.mkdir(cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)))
except OSError:
pass
try:
os.mkdir(cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)) + '/job' + str(j))
except OSError:
pass
print(cfg['out'])
with open(cfg['out'] + '/' + cfg['out'] + '.files', 'w') as jlist:
for j in range(1, int(max(jobs_df['job'])) + 1):
if args.which in ['snv','snvgroup','tools','merge']:
if 'model_order' in cfg:
for m in cfg['model_order']:
if m != '___no_tag___':
jlist.write(str(j) + '\t' + cfg['out'] + '.' + m + '.gz' + '\t' + cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.' + m + '.gz\n')
else:
jlist.write(str(j) + '\t' + cfg['out'] + '.gz' + '\t' + cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.gz\n')
else:
jlist.write(str(j) + '\t' + cfg['out'] + '.gz' + '\t' + cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.gz\n')
if 'meta_order' in cfg:
if len(cfg['meta_order']) > 0:
for m in cfg['meta_order']:
jlist.write(str(j) + '\t' + cfg['out'] + '.' + m + '.gz' + '\t' + cfg['out'] + '/jobs' + str(int(100 * ((j-1) // 100) + 1)) + '-' + str(int(100 * ((j-1) // 100) + 100)) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.' + m + '.gz\n')
jobs_df.to_csv(cfg['out'] + '/' + cfg['out'] + '.jobs',header=True,index=False,sep="\t")
with open(cfg['out'] + '/' + cfg['out'] + '.jobs.run','w') as f:
f.write("\n".join([str(x) for x in jobs_df['job'].unique()]))
else:
if len(rerun) > 0 and cfg['qsub'] is not None:
print('detected resubmit ...')
print(' an array containing ' + str(len(rerun)) + ' tasks will be submitted')
print(' <= ' + str(max(np.bincount(jobs_df['job']))) + ' regions per job')
print(' <= ' + str(int(max(jobs_df['cpu']))) + ' cpus per job')
print(' qsub options: ' + cfg['qsub'])
print(' output directory: ' + cfg['out'])
print(' replace: ' + str(cfg['replace']))
input_var = None
while input_var not in ['y','n','Y','N']:
input_var = input('\nresubmit jobs (yY/nN)? ')
if input_var.lower() == 'n':
print('canceled by user')
return
with open(cfg['out'] + '/' + cfg['out'] + '.jobs.run','w') as f:
f.write("\n".join([str(x) for x in jobs_df['job'][jobs_df['job'].isin(rerun)]]))
os.remove(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.rerun')
if args.which == 'settings':
if 'ordered_args' in args:
for k in args.ordered_args:
ini.set('main',k[0],k[1])
with open(resource_filename('uga', 'settings.ini'), 'w') as f:
ini.write(f)
print('main settings ...')
for s in ini.sections():
for k in ini.options(s):
print(' ' + k + ' = ' + ini.get(s,k))
elif args.which in ['snv','snvgroup','meta','merge','resubmit','tools']:
if cfg['qsub'] is not None:
print("submitting jobs\n")
out = cfg['out']
joblist = list(range(1, int(max(jobs_df['job'])) + 1)) if len(rerun) == 0 else rerun
if int(max(jobs_df['job'])) > 1:
cfg['out'] = out + '/jobsUGA_JOB_RANGE/jobUGA_JOB_ID/' + os.path.basename(out) + '.jobUGA_JOB_ID'
cfg['job'] = 'UGA_JOB_ID'
if cfg['qsub'] is not None:
cfg['qsub'] = cfg['qsub'] + ' -t 1-' + str(len(joblist))
else:
cfg['out'] = out + '/' + os.path.basename(out)
cfg['job'] = 1
if cfg['qsub'] is not None:
cfg['qsub'] = cfg['qsub'] + ' -t 1'
args.ordered_args = [('out',cfg['out']),('region_file',out + '/' + out + '.jobs'),('job',cfg['job']),('cpus',int(max(jobs_df['cpu'])))] + [x for x in args.ordered_args if x[0] not in ['out','region_file','cpus']]
cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
if cfg['qsub'] is not None:
Process.qsub(['qsub'] + cfg['qsub'].split() + ['-N',out,'-o',out + '/temp',qsub_wrapper],'\"' + cmd + '\"',out + '/' + out + '.jobs.run',cfg['out'] + '.log')
else:
Process.interactive(qsub_wrapper, cmd, cfg['out'] + '.' + args.which + '.log')
elif args.which == 'compile':
files = pd.read_table(args.dir + '/' + os.path.basename(args.dir) + '.files', names=['job','out','file'])
complete, rerun = Fxns.verify_results(args.dir,files)
if len(rerun) > 0:
print(Process.print_error('detected ' + str(len(rerun)) + ' failed jobs\n use resubmit module to rerun failed jobs'))
with open(args.dir + '/' + os.path.basename(args.dir) + '.rerun', 'w') as f:
f.write("\n".join([str(x) for x in rerun]))
else:
complete = Fxns.compile_results(args.dir,files)
if complete:
input_var = None
while input_var not in ['y','n','Y','N']:
input_var = input('delete obselete job subdirectories and files for this project (yY/nN)? ')
if input_var.lower() == 'n':
print('canceled by user')
else:
print('deleting subdirectories')
for d in glob.glob(args.dir + '/jobs*-*'):
try:
shutil.rmtree(d)
except OSError:
print(Process.print_error('unable to delete job data directory ' + d))
print('deleting temporary directory')
try:
shutil.rmtree(args.dir + '/temp')
except OSError:
print(Process.print_error('unable to delete temporary directory ' + args.dir + '/temp'))
print("deleting last job run list")
try:
os.remove(args.dir + '/' + os.path.basename(args.dir) + '.jobs.run')
except OSError:
print(Process.print_error('unable to delete job run list ' + args.dir + '/' + os.path.basename(args.dir) + '.jobs.run'))
else:
print(Process.print_error('file compilation incomplete'))
elif args.which in ['snvgroupplot','snvplot']:
cfg['out'] = '.'.join(cfg['file'].split('.')[0:len(cfg['file'].split('.'))-1]) + '.' + args.which
args.ordered_args = [('out',cfg['out'])] + [x for x in args.ordered_args if x[0] not in ['out']]
cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
if cfg['qsub'] is not None:
Process.qsub(['qsub'] + cfg['qsub'].split() + ['-o',cfg['out'] + '.log',qsub_wrapper],'\"' + cmd + '\"')
else:
Process.interactive(qsub_wrapper, cmd, cfg['out'] + '.log')
elif args.which == 'filter':
if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.log')):
if args.replace:
try:
os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'))
except OSError:
print(Process.print_error('unable to remove existing log file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.log')))
return
else:
print(Process.print_error('log file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.log') + ' already exists, use --replace to overwrite existing results'))
return
if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz')):
if args.replace:
try:
os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz'))
except OSError:
print(Process.print_error('unable to remove existing inflation corrected results file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz')))
else:
print(Process.print_error('results file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz') + ' already exists, use --replace to overwrite existing results'))
return
if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi')):
if args.replace:
try:
os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi'))
except OSError:
print(Process.print_error('unable to remove existing inflation corrected results index file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi')))
else:
print(Process.print_error('results index file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi') + ' already exists, use --replace to overwrite existing results'))
return
cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
if cfg['qsub'] is not None:
Process.qsub(['qsub'] + cfg['qsub'].split() + ['-o',cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'),qsub_wrapper],'\"' + cmd + '\"')
else:
Process.interactive(qsub_wrapper, cmd, cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'))
else:
print(Process.print_error(args.which + " not a currently available module"))
print('')
if __name__ == "__main__":
main()
os._exit(0)
| gpl-3.0 | -3,212,031,073,391,245,000 | 50.446188 | 305 | 0.576291 | false |
cigamit/boxeehack | hack/boxee/scripts/OpenSubtitles/resources/lib/plugins/Addic7ed.py | 3 | 6405 | # -*- coding: utf-8 -*-
# This file is part of periscope.
# Copyright (c) 2008-2011 Patrick Dessalle <[email protected]>
#
# periscope is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# periscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with periscope; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import zipfile, os, urllib2, urllib, logging, traceback, httplib, re, socket
from BeautifulSoup import BeautifulSoup
import SubtitleDatabase
LANGUAGES = {u"English" : "en",
u"English (US)" : "en",
u"English (UK)" : "en",
u"Italian" : "it",
u"Portuguese" : "pt",
u"Portuguese (Brazilian)" : "pt-br",
u"Romanian" : "ro",
u"Español (Latinoamérica)" : "es",
u"Español (España)" : "es",
u"Spanish (Latin America)" : "es",
u"Español" : "es",
u"Spanish" : "es",
u"Spanish (Spain)" : "es",
u"French" : "fr",
u"Greek" : "el",
u"Arabic" : "ar",
u"German" : "de",
u"Croatian" : "hr",
u"Indonesian" : "id",
u"Hebrew" : "he",
u"Russian" : "ru",
u"Turkish" : "tr",
u"Swedish" : "se",
u"Czech" : "cs",
u"Dutch" : "nl",
u"Hungarian" : "hu",
u"Norwegian" : "no",
u"Polish" : "pl",
u"Persian" : "fa"}
class Addic7ed(SubtitleDatabase.SubtitleDB):
url = "http://www.addic7ed.com"
site_name = "Addic7ed"
def __init__(self, config, cache_folder_path):
super(Addic7ed, self).__init__(langs=None,revertlangs=LANGUAGES)
#http://www.addic7ed.com/serie/Smallville/9/11/Absolute_Justice
self.host = "http://www.addic7ed.com"
self.release_pattern = re.compile(" \nVersion (.+), ([0-9]+).([0-9])+ MBs")
def process(self, filepath, langs):
''' main method to call on the plugin, pass the filename and the wished
languages and it will query the subtitles source '''
fname = unicode(self.getFileName(filepath).lower())
guessedData = self.guessFileData(fname)
if guessedData['type'] == 'tvshow':
subs = self.query(guessedData['name'], guessedData['season'], guessedData['episode'], guessedData['teams'], langs)
return subs
else:
return []
def query(self, name, season, episode, teams, langs=None):
''' makes a query and returns info (link, lang) about found subtitles'''
sublinks = []
name = name.lower().replace(" ", "_")
searchurl = "%s/serie/%s/%s/%s/%s" %(self.host, name, season, episode, name)
logging.debug("dl'ing %s" %searchurl)
try:
socket.setdefaulttimeout(3)
page = urllib2.urlopen(searchurl)
except urllib2.HTTPError, (inst):
logging.info("Error : %s - %s" %(searchurl, inst))
return sublinks
except urllib2.URLError, (inst):
logging.info("TimeOut : %s" %inst)
return sublinks
#HTML bug in addic7ed
content = page.read()
content = content.replace("The safer, easier way", "The safer, easier way \" />")
soup = BeautifulSoup(content)
for subs in soup("td", {"class":"NewsTitle", "colspan" : "3"}):
if not self.release_pattern.match(str(subs.contents[1])):
continue
subteams = self.release_pattern.match(str(subs.contents[1])).groups()[0].lower()
# Addic7ed only takes the real team into account
fteams = []
for team in teams:
fteams += team.split("-")
teams = set(fteams)
subteams = self.listTeams([subteams], [".", "_", " "])
logging.debug("[Addic7ed] Team from website: %s" %subteams)
logging.debug("[Addic7ed] Team from file: %s" %teams)
logging.debug("[Addic7ed] match ? %s" %subteams.issubset(teams))
langs_html = subs.findNext("td", {"class" : "language"})
p = re.compile('Works with (.*)')
works_with = subs.findNext("td", {"class" : "newsDate"})
works_with = works_with.contents[0].encode('utf-8').strip()
works_with_match = p.findall(works_with)
lang = self.getLG(langs_html.contents[0].strip().replace(' ', ''))
#logging.debug("[Addic7ed] Language : %s - lang : %s" %(langs_html, lang))
statusTD = langs_html.findNext("td")
status = statusTD.find("b").string.strip()
# take the last one (most updated if it exists)
links = statusTD.findNext("td").findAll("a")
link = "%s%s"%(self.host,links[len(links)-1]["href"])
#logging.debug("%s - match : %s - lang : %s" %(status == "Completed", subteams.issubset(teams), (not langs or lang in langs)))
if status == "Completed" and (not langs or lang in langs) :
result = {}
result["release"] = "%s.S%.2dE%.2d.%s" %(name.replace("_", ".").title(), int(season), int(episode), '.'.join(subteams)
)
if(len(works_with_match) > 0):
result["release"] = result["release"].decode('utf-8').strip() + " / " + works_with_match[0].decode('utf-8').strip()
result["lang"] = lang
result["link"] = link
result["page"] = searchurl
sublinks.append(result)
return sublinks
def listTeams(self, subteams, separators):
teams = []
for sep in separators:
subteams = self.splitTeam(subteams, sep)
#logging.debug(subteams)
return set(subteams)
def splitTeam(self, subteams, sep):
teams = []
for t in subteams:
teams += t.split(sep)
return teams
def createFile(self, subtitle):
'''pass the URL of the sub and the file it matches, will unzip it
and return the path to the created file'''
suburl = subtitle["link"]
videofilename = subtitle["filename"]
srtbasefilename = videofilename.rsplit(".", 1)[0]
srtfilename = srtbasefilename +".srt"
self.downloadFile(suburl, srtfilename)
return srtfilename
def downloadFile(self, url, srtfilename):
''' Downloads the given url to the given filename '''
req = urllib2.Request(url, headers={'Referer' : url, 'User-Agent' : 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3)'})
f = urllib2.urlopen(req)
dump = open(srtfilename, "wb")
dump.write(f.read())
dump.close()
f.close()
logging.debug("Download finished to file %s. Size : %s"%(srtfilename,os.path.getsize(srtfilename)))
| mit | 6,206,127,158,683,269,000 | 36.426901 | 129 | 0.645938 | false |
mgoffin/osquery | tools/codegen/gentargets.py | 3 | 2873 | #!/usr/bin/env python
import argparse
import json
import logging
import os
logging_format = '[%(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, "../.."))
def get_files_to_compile(json_data):
files_to_compile = []
for element in json_data:
filename = element["file"]
if not filename.endswith("tests.cpp") and \
not filename.endswith("benchmarks.cpp") and \
"third-party" not in filename and \
"example" not in filename and \
"generated/gen" not in filename and \
"test_util" not in filename:
base = filename.rfind("osquery/")
filename = filename[base + len("osquery/"):]
base_generated = filename.rfind("generated/")
if base_generated >= 0:
filename = filename[base_generated:]
files_to_compile.append(filename)
return files_to_compile
TARGETS_PREAMBLE = """
# DO NOT EDIT
# Automatically generated: make sync
thrift_library(
name="if",
languages=[
"cpp",
"py",
],
thrift_srcs={
"extensions/osquery.thrift": ["Extension", "ExtensionManager"],
},
)
cpp_library(
name="osquery_sdk",
srcs=["""
TARGETS_POSTSCRIPT = """ ],
deps=[
"@/thrift/lib/cpp/concurrency:concurrency",
":if-cpp",
],
external_deps=[
"boost",
"glog",
"gflags",
"gtest",
"rocksdb",
"libuuid",
],
compiler_flags=[
"-Wno-unused-function",
"-Wno-non-virtual-dtor",
"-Wno-address",
"-Wno-overloaded-virtual",
"-DOSQUERY_BUILD_VERSION=%s",
"-DOSQUERY_BUILD_SDK_VERSION=%s",
"-DOSQUERY_THRIFT_LIB=thrift/lib/cpp",
"-DOSQUERY_THRIFT_SERVER_LIB=thrift/lib/cpp/server/example",
"-DOSQUERY_THRIFT_POINTER=std",
"-DOSQUERY_THRIFT=osquery/gen-cpp/",
],
)
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Generate a TARGETS files from CMake metadata"
))
parser.add_argument("--input", "-i", required=True)
parser.add_argument("--version", "-v", required=True)
parser.add_argument("--sdk", required=True)
args = parser.parse_args()
try:
with open(args.input, "r") as f:
try:
json_data = json.loads(f.read())
except ValueError:
logging.critical("Error: %s is not valid JSON" % args.input)
source_files = get_files_to_compile(json_data)
print(TARGETS_PREAMBLE)
for source_file in source_files:
print(" \"%s\"," % source_file)
print(TARGETS_POSTSCRIPT % (args.version, args.sdk))
except IOError:
logging.critical("Error: %s doesn't exist" % args.input)
| bsd-3-clause | -5,040,691,160,054,863,000 | 26.893204 | 76 | 0.590324 | false |
gt-ros-pkg/rcommander-core | nodebox_qt/src/nodebox/gui/qt/widgets/animationspinner.py | 1 | 2415 | from PyQt4.QtGui import QLabel, QPainter, QPainterPath, QBrush, QColor, QPalette
from PyQt4.QtCore import Qt, SIGNAL, QTimer
def coordinates(x0, y0, distance, angle):
from math import radians, sin, cos
x1 = x0 + cos(radians(angle)) * distance
y1 = y0 + sin(radians(angle)) * distance
return x1, y1
class AnimationSpinner(QLabel):
def __init__(self, parent=None):
QLabel.__init__(self, parent)
brightness = parent.palette().color(QPalette.Window).valueF()
self._bw = brightness < 0.5 and 1 or 0
self._steps = 12
self._setup()
self._isRunning = False
self.animationTimer = None
def _setup(self):
steps = self._steps
anglestep = 360. / steps
fillstep = 0.6 / (steps - 1)
self._fillsteps = [0.71 - i * fillstep for i in range(steps)]
self._coords = [coordinates(8, 8, 6, anglestep*i) for i in range(steps)]
self._path = QPainterPath()
self._path.addRoundedRect(0, 0, 4, 2, 1, 1)
def start(self):
self.animationTimer = QTimer(self)
self.connect(self.animationTimer, SIGNAL("timeout()"), self.run)
self.animationTimer.start(35)
self._isRunning = True
def stop(self):
if self.animationTimer is not None:
self.animationTimer.stop()
self.animationTimer = None
self._isRunning = False
self.repaint()
def run(self):
self.repaint()
self._fillsteps = self._fillsteps[1:] + [self._fillsteps[0]]
def paintEvent(self, event):
if self._isRunning:
anglestep = 360. / self._steps
fillsteps = self._fillsteps
factor = min(self.width(), self.height()) / 16.
bw = self._bw
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing, True)
p.scale(factor, factor)
p.setPen(Qt.NoPen)
for i in range(self._steps):
x1, y1 = self._coords[i]
c = fillsteps[self._steps - 1 - i]
a = anglestep * i
p.setBrush(QBrush(QColor.fromRgbF(bw, bw, bw, c)))
p.save()
p.translate(x1 - 2, y1 - 1)
p.translate(2, 1)
p.rotate(a)
p.translate(-2, -1)
p.drawPath(self._path)
p.restore()
| bsd-3-clause | -6,817,277,009,778,616,000 | 34 | 80 | 0.542443 | false |
thequbit/chatterstats | gettweets.py | 1 | 1147 | from twitter import *
from cfgfile import parseconfig
def getauth():
settings = parseconfig('twitterauth.ini')
return settings
def init():
settings = getauth()
t = Twitter(
auth=OAuth(settings['OAUTH_TOKEN'],settings['OAUTH_SECRET'],
settings['CONSUMER_KEY'],settings['CONSUMER_SECRET'])
)
return t
def gettweets(lat,lng,radius="20mi",count=100):
t = init()
geocode = "{0},{1},{2}".format(lat,lng,radius)
results = t.search.tweets(q="#roc",geocode=geocode,count=count)
hashtags = {}
tweets = []
success = True
try:
for status in results['statuses']:
sn = status['user']['screen_name']
tid = status['id']
text = status['text']
created = status['created_at']
tweets.append((sn,tid,text,created))
for tag in status['entities']['hashtags']:
if tag['text'] in hashtags:
hashtags[tag['text']] += 1
else:
hashtags[tag['text']] = 1
except:
success = False
return tweets,success
| gpl-3.0 | -2,582,389,231,847,618,000 | 30 | 80 | 0.537053 | false |
aleneum/rsbhsm | tests/test_nesting.py | 1 | 13226 | # -*- coding: utf-8 -*-
try:
from builtins import object
except ImportError:
pass
import sys
from transitions.extensions import MachineFactory
from transitions.extensions.nesting import NestedState as State
from .test_core import TestTransitions as TestsCore
from .utils import Stuff
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
state_separator = State.separator
class TestTransitions(TestsCore):
def setUp(self):
states = ['A', 'B', {'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]},
'D', 'E', 'F']
machine_cls = MachineFactory.get_predefined(nested=True)
self.stuff = Stuff(states, machine_cls)
def test_transition_definitions(self):
states = ['A', 'B', {'name': 'C', 'children': ['1', '2', '3']}, 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'},
{'trigger': 'run', 'source': 'C', 'dest': 'C%s1' % State.separator}
]
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
m.walk()
self.assertEquals(m.state, 'B')
m.run()
self.assertEquals(m.state, 'C')
m.run()
self.assertEquals(m.state, 'C%s1' % State.separator)
# Define with list of lists
transitions = [
['walk', 'A', 'B'],
['run', 'B', 'C'],
['sprint', 'C', 'D']
]
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
m.to_C()
m.sprint()
self.assertEquals(m.state, 'D')
def test_transitioning(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('advance', 'B', 'C')
s.machine.add_transition('advance', 'C', 'D')
s.machine.add_transition('reset', '*', 'A')
self.assertEqual(len(s.machine.events['reset'].transitions['C%s1' % State.separator]), 1)
s.advance()
self.assertEquals(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEquals(s.state, 'C')
def test_conditions(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B', conditions='this_passes')
s.machine.add_transition('advance', 'B', 'C', unless=['this_fails'])
s.machine.add_transition('advance', 'C', 'D', unless=['this_fails',
'this_passes'])
s.advance()
self.assertEquals(s.state, 'B')
s.advance()
self.assertEquals(s.state, 'C')
s.advance()
self.assertEquals(s.state, 'C')
def test_multiple_add_transitions_from_state(self):
s = self.stuff
s.machine.add_transition(
'advance', 'A', 'B', conditions=['this_fails'])
s.machine.add_transition('advance', 'A', 'C')
s.machine.add_transition('advance', 'C', 'C%s2' % State.separator)
s.advance()
self.assertEquals(s.state, 'C')
s.advance()
self.assertEquals(s.state, 'C%s2' % State.separator)
def test_use_machine_as_model(self):
states = ['A', 'B', 'C', 'D']
m = self.stuff.machine_cls(states=states, initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move_to_C', 'B', 'C')
m.move()
self.assertEquals(m.state, 'B')
def test_state_change_listeners(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'C%s1' % State.separator)
s.machine.add_transition('reverse', 'C', 'A')
s.machine.add_transition('lower', 'C%s1' % State.separator, 'C{0}3{0}a'.format(State.separator))
s.machine.add_transition('rise', 'C%s3' % State.separator, 'C%s1' % State.separator)
s.machine.add_transition('fast', 'A', 'C{0}3{0}a'.format(State.separator))
s.machine.on_enter_C('hello_world')
s.machine.on_exit_C('goodbye')
s.machine.on_enter('C{0}3{0}a'.format(State.separator), 'greet')
s.machine.on_exit('C%s3' % State.separator, 'meet')
s.advance()
self.assertEquals(s.state, 'C%s1' % State.separator)
self.assertEquals(s.message, 'Hello World!')
s.lower()
self.assertEquals(s.state, 'C{0}3{0}a'.format(State.separator))
self.assertEquals(s.message, 'Hi')
s.rise()
self.assertEquals(s.state, 'C%s1' % State.separator)
self.assertTrue(s.message.startswith('Nice to'))
s.reverse()
self.assertEquals(s.state, 'A')
self.assertTrue(s.message.startswith('So long'))
s.fast()
self.assertEquals(s.state, 'C{0}3{0}a'.format(State.separator))
self.assertEquals(s.message, 'Hi')
s.to_A()
self.assertEquals(s.state, 'A')
self.assertTrue(s.message.startswith('So long'))
def test_enter_exit_nested(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'C%s1' % State.separator)
s.machine.add_transition('reverse', 'C', 'A')
s.machine.add_transition('lower', 'C%s1' % State.separator, 'C{0}3{0}a'.format(State.separator))
s.machine.add_transition('rise', 'C%s3' % State.separator, 'C%s1' % State.separator)
s.machine.add_transition('fast', 'A', 'C{0}3{0}a'.format(State.separator))
for name, state in s.machine.states.items():
state.on_enter.append('increase_level')
state.on_exit.append('decrease_level')
s.advance()
self.assertEquals(s.state, 'C%s1' % State.separator)
self.assertEquals(s.level, 2)
s.lower()
self.assertEquals(s.state, 'C{0}3{0}a'.format(State.separator))
self.assertEquals(s.level, 3)
s.rise()
self.assertEquals(s.state, 'C%s1' % State.separator)
self.assertEquals(s.level, 2)
s.reverse()
self.assertEquals(s.state, 'A')
self.assertEquals(s.level, 1)
s.fast()
self.assertEquals(s.state, 'C{0}3{0}a'.format(State.separator))
self.assertEquals(s.level, 3)
s.to_A()
self.assertEquals(s.state, 'A')
self.assertEquals(s.level, 1)
if State.separator in '_':
s.to_C_3_a()
else:
s.to_C.s3.a()
self.assertEquals(s.state, 'C{0}3{0}a'.format(State.separator))
self.assertEquals(s.level, 3)
def test_ordered_transitions(self):
states = ['beginning', 'middle', 'end']
m = self.stuff.machine_cls(None, states)
m.add_ordered_transitions()
self.assertEquals(m.state, 'initial')
m.next_state()
self.assertEquals(m.state, 'beginning')
m.next_state()
m.next_state()
self.assertEquals(m.state, 'end')
m.next_state()
self.assertEquals(m.state, 'initial')
# Include initial state in loop
m = self.stuff.machine_cls(None, states)
m.add_ordered_transitions(loop_includes_initial=False)
m.to_end()
m.next_state()
self.assertEquals(m.state, 'beginning')
# Test user-determined sequence and trigger name
m = self.stuff.machine_cls(None, states, initial='beginning')
m.add_ordered_transitions(['end', 'beginning'], trigger='advance')
m.advance()
self.assertEquals(m.state, 'end')
m.advance()
self.assertEquals(m.state, 'beginning')
# Via init argument
m = self.stuff.machine_cls(
None, states, initial='beginning', ordered_transitions=True)
m.next_state()
self.assertEquals(m.state, 'middle')
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
m.walk()
dump = pickle.dumps(m)
self.assertIsNotNone(dump)
m2 = pickle.loads(dump)
self.assertEqual(m.state, m2.state)
m2.run()
def test_callbacks_duplicate(self):
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'C', 'before': 'before_state_change',
'after': 'after_state_change'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'}
]
m = self.stuff.machine_cls(None, states=['A', 'B', 'C'], transitions=transitions,
before_state_change='before_state_change',
after_state_change='after_state_change', send_event=True,
initial='A', auto_transitions=True)
m.before_state_change = MagicMock()
m.after_state_change = MagicMock()
m.walk()
self.assertEqual(m.before_state_change.call_count, 2)
self.assertEqual(m.after_state_change.call_count, 2)
def test_with_custom_separator(self):
State.separator = '.'
self.tearDown()
self.setUp()
self.test_enter_exit_nested()
self.tearDown()
self.setUp()
self.test_state_change_listeners()
self.tearDown()
self.setUp()
self.test_nested_auto_transitions()
State.separator = '.' if sys.version_info[0] < 3 else u'↦'
self.tearDown()
self.setUp()
self.test_enter_exit_nested()
self.tearDown()
self.setUp()
self.test_state_change_listeners()
self.tearDown()
self.setUp()
self.test_nested_auto_transitions()
def test_nested_auto_transitions(self):
s = self.stuff
s.to_C()
self.assertEqual(s.state, 'C')
state = 'C{0}3{0}a'.format(State.separator)
s.machine.to(s, state)
self.assertEqual(s.state, state)
def test_example_one(self):
State.separator = '_'
states = ['standing', 'walking', {'name': 'caffeinated', 'children': ['dithering', 'running']}]
transitions = [['walk', 'standing', 'walking'],
['stop', 'walking', 'standing'],
['drink', '*', 'caffeinated'],
['walk', 'caffeinated', 'caffeinated_running'],
['relax', 'caffeinated', 'standing']]
machine = self.stuff.machine_cls(states=states, transitions=transitions, initial='standing',
ignore_invalid_triggers=True, name='Machine 1')
machine.walk() # Walking now
machine.stop() # let's stop for a moment
machine.drink() # coffee time
machine.state
self.assertEqual(machine.state, 'caffeinated')
machine.walk() # we have to go faster
self.assertEqual(machine.state, 'caffeinated_running')
machine.stop() # can't stop moving!
machine.state
self.assertEqual(machine.state, 'caffeinated_running')
machine.relax() # leave nested state
machine.state # phew, what a ride
self.assertEqual(machine.state, 'standing')
machine.to_caffeinated_running() # auto transition fast track
machine.on_enter_caffeinated_running('callback_method')
def test_example_two(self):
State.separator = '.' if sys.version_info[0] < 3 else u'↦'
states = ['A', 'B',
{'name': 'C', 'children':
['1', '2',
{'name': '3', 'children': ['a', 'b', 'c']}
]
}
]
transitions = [
['reset', 'C', 'A'],
['reset', 'C%s2' % State.separator, 'C'] # overwriting parent reset
]
# we rely on auto transitions
machine = self.stuff.machine_cls(states=states, transitions=transitions, initial='A')
machine.to_B() # exit state A, enter state B
machine.to_C() # exit B, enter C
machine.to_C.s3.a() # enter C↦a; enter C↦3↦a;
self.assertEqual(machine.state, 'C{0}3{0}a'.format(State.separator))
machine.to_C.s2() # exit C↦3↦a, exit C↦3, enter C↦2
machine.reset() # exit C↦2; reset C has been overwritten by C↦3
self.assertEqual(machine.state, 'C')
machine.reset() # exit C, enter A
self.assertEqual(machine.state, 'A')
class TestWithGraphTransitions(TestTransitions):
def setUp(self):
State.separator = state_separator
states = ['A', 'B', {'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]},
'D', 'E', 'F']
machine_cls = MachineFactory.get_predefined(graph=True, nested=True)
self.stuff = Stuff(states, machine_cls)
| mit | 7,386,253,963,974,170,000 | 37.721408 | 110 | 0.554756 | false |
erdewit/ib_insync | ib_insync/decoder.py | 1 | 38479 | """Deserialize and dispatch messages."""
import dataclasses
import logging
from datetime import datetime, timezone
from .contract import (
ComboLeg, Contract, ContractDescription, ContractDetails,
DeltaNeutralContract)
from .objects import (
BarData, CommissionReport, DepthMktDataDescription, Execution, FamilyCode,
HistogramData, HistoricalTick, HistoricalTickBidAsk, HistoricalTickLast,
NewsProvider, PriceIncrement, SmartComponent, SoftDollarTier, TagValue,
TickAttribBidAsk, TickAttribLast)
from .order import Order, OrderComboLeg, OrderCondition, OrderState
from .util import UNSET_DOUBLE, parseIBDatetime
from .wrapper import Wrapper
__all__ = ['Decoder']
class Decoder:
"""Decode IB messages and invoke corresponding wrapper methods."""
def __init__(self, wrapper: Wrapper, serverVersion: int):
self.wrapper = wrapper
self.serverVersion = serverVersion
self.logger = logging.getLogger('ib_insync.Decoder')
self.handlers = {
1: self.priceSizeTick,
2: self.wrap(
'tickSize', [int, int, int]),
3: self.wrap(
'orderStatus', [
int, str, float, float, float, int, int,
float, int, str, float], skip=1),
4: self.wrap(
'error', [int, int, str]),
5: self.openOrder,
6: self.wrap(
'updateAccountValue', [str, str, str, str]),
7: self.updatePortfolio,
8: self.wrap(
'updateAccountTime', [str]),
9: self.wrap(
'nextValidId', [int]),
10: self.contractDetails,
11: self.execDetails,
12: self.wrap(
'updateMktDepth', [int, int, int, int, float, int]),
13: self.wrap(
'updateMktDepthL2',
[int, int, str, int, int, float, int, bool]),
14: self.wrap(
'updateNewsBulletin', [int, int, str, str]),
15: self.wrap(
'managedAccounts', [str]),
16: self.wrap(
'receiveFA', [int, str]),
17: self.historicalData,
18: self.bondContractDetails,
19: self.wrap(
'scannerParameters', [str]),
20: self.scannerData,
21: self.tickOptionComputation,
45: self.wrap(
'tickGeneric', [int, int, float]),
46: self.wrap(
'tickString', [int, int, str]),
47: self.wrap(
'tickEFP',
[int, int, float, str, float, int, str, float, float]),
49: self.wrap(
'currentTime', [int]),
50: self.wrap(
'realtimeBar',
[int, int, float, float, float, float, int, float, int]),
51: self.wrap(
'fundamentalData', [int, str]),
52: self.wrap(
'contractDetailsEnd', [int]),
53: self.wrap(
'openOrderEnd', []),
54: self.wrap(
'accountDownloadEnd', [str]),
55: self.wrap(
'execDetailsEnd', [int]),
56: self.deltaNeutralValidation,
57: self.wrap(
'tickSnapshotEnd', [int]),
58: self.wrap(
'marketDataType', [int, int]),
59: self.commissionReport,
61: self.position,
62: self.wrap(
'positionEnd', []),
63: self.wrap(
'accountSummary', [int, str, str, str, str]),
64: self.wrap(
'accountSummaryEnd', [int]),
65: self.wrap(
'verifyMessageAPI', [str]),
66: self.wrap(
'verifyCompleted', [bool, str]),
67: self.wrap(
'displayGroupList', [int, str]),
68: self.wrap(
'displayGroupUpdated', [int, str]),
69: self.wrap(
'verifyAndAuthMessageAPI', [str, str]),
70: self.wrap(
'verifyAndAuthCompleted', [bool, str]),
71: self.positionMulti,
72: self.wrap(
'positionMultiEnd', [int]),
73: self.wrap(
'accountUpdateMulti', [int, str, str, str, str, str]),
74: self.wrap(
'accountUpdateMultiEnd', [int]),
75: self.securityDefinitionOptionParameter,
76: self.wrap(
'securityDefinitionOptionParameterEnd', [int], skip=1),
77: self.softDollarTiers,
78: self.familyCodes,
79: self.symbolSamples,
80: self.mktDepthExchanges,
81: self.wrap(
'tickReqParams', [int, float, str, int], skip=1),
82: self.smartComponents,
83: self.wrap(
'newsArticle', [int, int, str], skip=1),
84: self.wrap(
'tickNews', [int, int, str, str, str, str], skip=1),
85: self.newsProviders,
86: self.wrap(
'historicalNews', [int, str, str, str, str], skip=1),
87: self.wrap(
'historicalNewsEnd', [int, bool], skip=1),
88: self.wrap(
'headTimestamp', [int, str], skip=1),
89: self.histogramData,
90: self.historicalDataUpdate,
91: self.wrap(
'rerouteMktDataReq', [int, int, str], skip=1),
92: self.wrap(
'rerouteMktDepthReq', [int, int, str], skip=1),
93: self.marketRule,
94: self.wrap(
'pnl', [int, float, float, float], skip=1),
95: self.wrap(
'pnlSingle', [int, int, float, float, float, float], skip=1),
96: self.historicalTicks,
97: self.historicalTicksBidAsk,
98: self.historicalTicksLast,
99: self.tickByTick,
100: self.wrap(
'orderBound', [int, int, int], skip=1),
101: self.completedOrder,
102: self.wrap(
'completedOrdersEnd', [], skip=1),
}
def wrap(self, methodName, types, skip=2):
"""
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
"""
def handler(fields):
method = getattr(self.wrapper, methodName, None)
if method:
try:
args = [
field if typ is str else
int(field or 0) if typ is int else
float(field or 0) if typ is float else
bool(int(field or 0))
for (typ, field) in zip(types, fields[skip:])]
method(*args)
except Exception:
self.logger.exception(f'Error for {methodName}:')
return handler
def interpret(self, fields):
"""Decode fields and invoke corresponding wrapper method."""
try:
msgId = int(fields[0])
handler = self.handlers[msgId]
handler(fields)
except Exception:
self.logger.exception(f'Error handling fields: {fields}')
def parse(self, obj):
"""Parse the object's properties according to its default types."""
for field in dataclasses.fields(obj):
typ = type(field.default)
if typ is str:
continue
v = getattr(obj, field.name)
if typ is int:
setattr(obj, field.name, int(v) if v else field.default)
elif typ is float:
setattr(obj, field.name, float(v) if v else field.default)
elif typ is bool:
setattr(obj, field.name, bool(int(v)) if v else field.default)
def priceSizeTick(self, fields):
_, _, reqId, tickType, price, size, _ = fields
if price:
self.wrapper.priceSizeTick(
int(reqId), int(tickType), float(price), int(size))
def updatePortfolio(self, fields):
c = Contract()
(
_, _,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.primaryExchange,
c.currency,
c.localSymbol,
c.tradingClass,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName) = fields
self.parse(c)
self.wrapper.updatePortfolio(
c, float(position), float(marketPrice),
float(marketValue), float(averageCost), float(unrealizedPNL),
float(realizedPNL), accountName)
def contractDetails(self, fields):
cd = ContractDetails()
cd.contract = c = Contract()
(
_, _,
reqId,
c.symbol,
c.secType,
lastTimes,
c.strike,
c.right,
c.exchange,
c.currency,
c.localSymbol,
cd.marketName,
c.tradingClass,
c.conId,
cd.minTick,
cd.mdSizeMultiplier,
c.multiplier,
cd.orderTypes,
cd.validExchanges,
cd.priceMagnifier,
cd.underConId,
cd.longName,
c.primaryExchange,
cd.contractMonth,
cd.industry,
cd.category,
cd.subcategory,
cd.timeZoneId,
cd.tradingHours,
cd.liquidHours,
cd.evRule,
cd.evMultiplier,
numSecIds,
*fields) = fields
numSecIds = int(numSecIds)
if numSecIds > 0:
cd.secIdList = []
for _ in range(numSecIds):
tag, value, *fields = fields
cd.secIdList += [TagValue(tag, value)]
(
cd.aggGroup,
cd.underSymbol,
cd.underSecType,
cd.marketRuleIds,
cd.realExpirationDate,
*fields) = fields
if self.serverVersion >= 152:
(cd.stockType, ) = fields
times = lastTimes.split()
if len(times) > 0:
c.lastTradeDateOrContractMonth = times[0]
if len(times) > 1:
cd.lastTradeTime = times[1]
self.parse(cd)
self.parse(c)
self.wrapper.contractDetails(int(reqId), cd)
def bondContractDetails(self, fields):
cd = ContractDetails()
cd.contract = c = Contract()
(
_, _,
reqId,
c.symbol,
c.secType,
cd.cusip,
cd.coupon,
lastTimes,
cd.issueDate,
cd.ratings,
cd.bondType,
cd.couponType,
cd.convertible,
cd.callable,
cd.putable,
cd.descAppend,
c.exchange,
c.currency,
cd.marketName,
c.tradingClass,
c.conId,
cd.minTick,
cd.mdSizeMultiplier,
cd.orderTypes,
cd.validExchanges,
cd.nextOptionDate,
cd.nextOptionType,
cd.nextOptionPartial,
cd.notes,
cd.longName,
cd.evRule,
cd.evMultiplier,
numSecIds,
*fields) = fields
numSecIds = int(numSecIds)
if numSecIds > 0:
cd.secIdList = []
for _ in range(numSecIds):
tag, value, *fields = fields
cd.secIdList += [TagValue(tag, value)]
cd.aggGroup, cd.marketRuleIds = fields
times = lastTimes.split()
if len(times) > 0:
cd.maturity = times[0]
if len(times) > 1:
cd.lastTradeTime = times[1]
if len(times) > 2:
cd.timeZoneId = times[2]
self.parse(cd)
self.parse(c)
self.wrapper.bondContractDetails(int(reqId), cd)
def execDetails(self, fields):
c = Contract()
ex = Execution()
(
_,
reqId,
ex.orderId,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.exchange,
c.currency,
c.localSymbol,
c.tradingClass,
ex.execId,
timeStr,
ex.acctNumber,
ex.exchange,
ex.side,
ex.shares,
ex.price,
ex.permId,
ex.clientId,
ex.liquidation,
ex.cumQty,
ex.avgPrice,
ex.orderRef,
ex.evRule,
ex.evMultiplier,
ex.modelCode,
ex.lastLiquidity) = fields
self.parse(c)
self.parse(ex)
time = parseIBDatetime(timeStr)
tz = self.wrapper.ib.TimezoneTWS
if tz:
time = tz.localize(time)
ex.time = time.astimezone(timezone.utc)
self.wrapper.execDetails(int(reqId), c, ex)
def historicalData(self, fields):
_, reqId, startDateStr, endDateStr, numBars, *fields = fields
get = iter(fields).__next__
for _ in range(int(numBars)):
bar = BarData(
date=get(),
open=float(get()),
high=float(get()),
low=float(get()),
close=float(get()),
volume=int(get()),
average=float(get()),
barCount=int(get()))
self.wrapper.historicalData(int(reqId), bar)
self.wrapper.historicalDataEnd(int(reqId), startDateStr, endDateStr)
def historicalDataUpdate(self, fields):
_, reqId, *fields = fields
get = iter(fields).__next__
bar = BarData(
barCount=int(get() or 0),
date=get(),
open=float(get() or 0),
close=float(get() or 0),
high=float(get() or 0),
low=float(get() or 0),
average=float(get() or 0),
volume=int(get() or 0))
self.wrapper.historicalDataUpdate(int(reqId), bar)
def scannerData(self, fields):
_, _, reqId, n, *fields = fields
for _ in range(int(n)):
cd = ContractDetails()
cd.contract = c = Contract()
(
rank,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.exchange,
c.currency,
c.localSymbol,
cd.marketName,
c.tradingClass,
distance,
benchmark,
projection,
legsStr,
*fields) = fields
self.parse(cd)
self.parse(c)
self.wrapper.scannerData(
int(reqId), int(rank), cd,
distance, benchmark, projection, legsStr)
self.wrapper.scannerDataEnd(int(reqId))
def tickOptionComputation(self, fields):
_, _, reqId, tickTypeInt, impliedVol, delta, optPrice, \
pvDividend, gamma, vega, theta, undPrice = fields
self.wrapper.tickOptionComputation(
int(reqId), int(tickTypeInt),
float(impliedVol) if impliedVol != '-1' else None,
float(delta) if delta != '-2' else None,
float(optPrice) if optPrice != '-1' else None,
float(pvDividend) if pvDividend != '-1' else None,
float(gamma) if gamma != '-2' else None,
float(vega) if vega != '-2' else None,
float(theta) if theta != '-2' else None,
float(undPrice) if undPrice != '-1' else None)
def deltaNeutralValidation(self, fields):
_, _, reqId, conId, delta, price = fields
self.wrapper.deltaNeutralValidation(
int(reqId), DeltaNeutralContract(
int(conId), float(delta or 0), float(price or 0)))
def commissionReport(self, fields):
_, _, execId, commission, currency, realizedPNL, \
yield_, yieldRedemptionDate = fields
self.wrapper.commissionReport(
CommissionReport(
execId, float(commission or 0), currency,
float(realizedPNL or 0), float(yield_ or 0),
int(yieldRedemptionDate or 0)))
def position(self, fields):
c = Contract()
(
_, _,
account,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.exchange,
c.currency,
c.localSymbol,
c.tradingClass,
position,
avgCost) = fields
self.parse(c)
self.wrapper.position(
account, c, float(position or 0), float(avgCost or 0))
def positionMulti(self, fields):
c = Contract()
(
_, _,
reqId,
account,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.exchange,
c.currency,
c.localSymbol,
c.tradingClass,
position,
avgCost,
modelCode) = fields
self.parse(c)
self.wrapper.positionMulti(
int(reqId), account, modelCode, c,
float(position or 0), float(avgCost or 0))
def securityDefinitionOptionParameter(self, fields):
_, reqId, exchange, underlyingConId, tradingClass, multiplier, \
n, *fields = fields
n = int(n)
expirations = fields[:n]
strikes = [float(field) for field in fields[n + 1:]]
self.wrapper.securityDefinitionOptionParameter(
int(reqId), exchange, underlyingConId, tradingClass,
multiplier, expirations, strikes)
def softDollarTiers(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
tiers = [
SoftDollarTier(
name=get(),
val=get(),
displayName=get())
for _ in range(int(n))]
self.wrapper.softDollarTiers(int(reqId), tiers)
def familyCodes(self, fields):
_, n, *fields = fields
get = iter(fields).__next__
familyCodes = [
FamilyCode(
accountID=get(),
familyCodeStr=get())
for _ in range(int(n))]
self.wrapper.familyCodes(familyCodes)
def symbolSamples(self, fields):
_, reqId, n, *fields = fields
cds = []
for _ in range(int(n)):
cd = ContractDescription()
cd.contract = c = Contract()
c.conId, c.symbol, c.secType, c.primaryExchange, c.currency, \
m, *fields = fields
c.conId = int(c.conId)
m = int(m)
cd.derivativeSecTypes = fields[:m]
fields = fields[m:]
cds.append(cd)
self.wrapper.symbolSamples(int(reqId), cds)
def smartComponents(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
components = [
SmartComponent(
bitNumber=int(get()),
exchange=get(),
exchangeLetter=get())
for _ in range(int(n))]
self.wrapper.smartComponents(int(reqId), components)
def mktDepthExchanges(self, fields):
_, n, *fields = fields
get = iter(fields).__next__
descriptions = [
DepthMktDataDescription(
exchange=get(),
secType=get(),
listingExch=get(),
serviceDataType=get(),
aggGroup=int(get()))
for _ in range(int(n))]
self.wrapper.mktDepthExchanges(descriptions)
def newsProviders(self, fields):
_, n, *fields = fields
get = iter(fields).__next__
providers = [
NewsProvider(
code=get(),
name=get())
for _ in range(int(n))]
self.wrapper.newsProviders(providers)
def histogramData(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
histogram = [
HistogramData(
price=float(get()),
count=int(get()))
for _ in range(int(n))]
self.wrapper.histogramData(int(reqId), histogram)
def marketRule(self, fields):
_, marketRuleId, n, *fields = fields
get = iter(fields).__next__
increments = [
PriceIncrement(
lowEdge=float(get()),
increment=float(get()))
for _ in range(int(n))]
self.wrapper.marketRule(int(marketRuleId), increments)
def historicalTicks(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
ticks = []
for _ in range(int(n)):
time = int(get())
get()
price = float(get())
size = int(get())
dt = datetime.fromtimestamp(time, timezone.utc)
ticks.append(
HistoricalTick(dt, price, size))
done = bool(int(get()))
self.wrapper.historicalTicks(int(reqId), ticks, done)
def historicalTicksBidAsk(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
ticks = []
for _ in range(int(n)):
time = int(get())
mask = int(get())
attrib = TickAttribBidAsk(
askPastHigh=bool(mask & 1),
bidPastLow=bool(mask & 2))
priceBid = float(get())
priceAsk = float(get())
sizeBid = int(get())
sizeAsk = int(get())
dt = datetime.fromtimestamp(time, timezone.utc)
ticks.append(
HistoricalTickBidAsk(
dt, attrib, priceBid, priceAsk, sizeBid, sizeAsk))
done = bool(int(get()))
self.wrapper.historicalTicksBidAsk(int(reqId), ticks, done)
def historicalTicksLast(self, fields):
_, reqId, n, *fields = fields
get = iter(fields).__next__
ticks = []
for _ in range(int(n)):
time = int(get())
mask = int(get())
attrib = TickAttribLast(
pastLimit=bool(mask & 1),
unreported=bool(mask & 2))
price = float(get())
size = int(get())
exchange = get()
specialConditions = get()
dt = datetime.fromtimestamp(time, timezone.utc)
ticks.append(
HistoricalTickLast(
dt, attrib, price, size, exchange, specialConditions))
done = bool(int(get()))
self.wrapper.historicalTicksLast(int(reqId), ticks, done)
def tickByTick(self, fields):
_, reqId, tickType, time, *fields = fields
reqId = int(reqId)
tickType = int(tickType)
time = int(time)
if tickType in (1, 2):
price, size, mask, exchange, specialConditions = fields
mask = int(mask)
attrib = TickAttribLast(
pastLimit=bool(mask & 1),
unreported=bool(mask & 2))
self.wrapper.tickByTickAllLast(
reqId, tickType, time, float(price), int(size),
attrib, exchange, specialConditions)
elif tickType == 3:
bidPrice, askPrice, bidSize, askSize, mask = fields
mask = int(mask)
attrib = TickAttribBidAsk(
bidPastLow=bool(mask & 1),
askPastHigh=bool(mask & 2))
self.wrapper.tickByTickBidAsk(
reqId, time, float(bidPrice), float(askPrice),
int(bidSize), int(askSize), attrib)
elif tickType == 4:
midPoint, = fields
self.wrapper.tickByTickMidPoint(reqId, time, float(midPoint))
def openOrder(self, fields):
o = Order()
c = Contract()
st = OrderState()
if self.serverVersion < 145:
fields.pop(0)
(
_,
o.orderId,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.exchange,
c.currency,
c.localSymbol,
c.tradingClass,
o.action,
o.totalQuantity,
o.orderType,
o.lmtPrice,
o.auxPrice,
o.tif,
o.ocaGroup,
o.account,
o.openClose,
o.origin,
o.orderRef,
o.clientId,
o.permId,
o.outsideRth,
o.hidden,
o.discretionaryAmt,
o.goodAfterTime,
_,
o.faGroup,
o.faMethod,
o.faPercentage,
o.faProfile,
o.modelCode,
o.goodTillDate,
o.rule80A,
o.percentOffset,
o.settlingFirm,
o.shortSaleSlot,
o.designatedLocation,
o.exemptCode,
o.auctionStrategy,
o.startingPrice,
o.stockRefPrice,
o.delta,
o.stockRangeLower,
o.stockRangeUpper,
o.displaySize,
o.blockOrder,
o.sweepToFill,
o.allOrNone,
o.minQty,
o.ocaType,
o.eTradeOnly,
o.firmQuoteOnly,
o.nbboPriceCap,
o.parentId,
o.triggerMethod,
o.volatility,
o.volatilityType,
o.deltaNeutralOrderType,
o.deltaNeutralAuxPrice,
*fields) = fields
if o.deltaNeutralOrderType:
(
o.deltaNeutralConId,
o.deltaNeutralSettlingFirm,
o.deltaNeutralClearingAccount,
o.deltaNeutralClearingIntent,
o.deltaNeutralOpenClose,
o.deltaNeutralShortSale,
o.deltaNeutralShortSaleSlot,
o.deltaNeutralDesignatedLocation,
*fields) = fields
(
o.continuousUpdate,
o.referencePriceType,
o.trailStopPrice,
o.trailingPercent,
o.basisPoints,
o.basisPointsType,
c.comboLegsDescrip,
*fields) = fields
numLegs = int(fields.pop(0))
c.comboLegs = []
for _ in range(numLegs):
leg = ComboLeg()
(
leg.conId,
leg.ratio,
leg.action,
leg.exchange,
leg.openClose,
leg.shortSaleSlot,
leg.designatedLocation,
leg.exemptCode,
*fields) = fields
self.parse(leg)
c.comboLegs.append(leg)
numOrderLegs = int(fields.pop(0))
o.orderComboLegs = []
for _ in range(numOrderLegs):
leg = OrderComboLeg()
leg.price = fields.pop(0)
self.parse(leg)
o.orderComboLegs.append(leg)
numParams = int(fields.pop(0))
if numParams > 0:
o.smartComboRoutingParams = []
for _ in range(numParams):
tag, value, *fields = fields
o.smartComboRoutingParams.append(
TagValue(tag, value))
(
o.scaleInitLevelSize,
o.scaleSubsLevelSize,
increment,
*fields) = fields
o.scalePriceIncrement = float(increment or UNSET_DOUBLE)
if 0 < o.scalePriceIncrement < UNSET_DOUBLE:
(
o.scalePriceAdjustValue,
o.scalePriceAdjustInterval,
o.scaleProfitOffset,
o.scaleAutoReset,
o.scaleInitPosition,
o.scaleInitFillQty,
o.scaleRandomPercent,
*fields) = fields
o.hedgeType = fields.pop(0)
if o.hedgeType:
o.hedgeParam = fields.pop(0)
(
o.optOutSmartRouting,
o.clearingAccount,
o.clearingIntent,
o.notHeld,
dncPresent,
*fields) = fields
if int(dncPresent):
conId, delta, price, *fields = fields
c.deltaNeutralContract = DeltaNeutralContract(
int(conId or 0), float(delta or 0), float(price or 0))
o.algoStrategy = fields.pop(0)
if o.algoStrategy:
numParams = int(fields.pop(0))
if numParams > 0:
o.algoParams = []
for _ in range(numParams):
tag, value, *fields = fields
o.algoParams.append(
TagValue(tag, value))
(
o.solicited,
o.whatIf,
st.status,
*fields) = fields
if self.serverVersion >= 142:
(
st.initMarginBefore,
st.maintMarginBefore,
st.equityWithLoanBefore,
st.initMarginChange,
st.maintMarginChange,
st.equityWithLoanChange,
*fields) = fields
(
st.initMarginAfter,
st.maintMarginAfter,
st.equityWithLoanAfter,
st.commission,
st.minCommission,
st.maxCommission,
st.commissionCurrency,
st.warningText,
o.randomizeSize,
o.randomizePrice,
*fields) = fields
if o.orderType == 'PEG BENCH':
(
o.referenceContractId,
o.isPeggedChangeAmountDecrease,
o.peggedChangeAmount,
o.referenceChangeAmount,
o.referenceExchangeId,
*fields) = fields
numConditions = int(fields.pop(0))
if numConditions > 0:
for _ in range(numConditions):
condType = int(fields.pop(0))
condCls = OrderCondition.createClass(condType)
n = len(dataclasses.fields(condCls)) - 1
cond = condCls(condType, *fields[:n])
self.parse(cond)
o.conditions.append(cond)
fields = fields[n:]
(
o.conditionsIgnoreRth,
o.conditionsCancelOrder,
*fields) = fields
(
o.adjustedOrderType,
o.triggerPrice,
o.trailStopPrice,
o.lmtPriceOffset,
o.adjustedStopPrice,
o.adjustedStopLimitPrice,
o.adjustedTrailingAmount,
o.adjustableTrailingUnit,
o.softDollarTier.name,
o.softDollarTier.val,
o.softDollarTier.displayName,
o.cashQty,
*fields) = fields
if self.serverVersion >= 141:
o.dontUseAutoPriceForHedge = fields.pop(0)
if self.serverVersion >= 145:
o.isOmsContainer = fields.pop(0)
if self.serverVersion >= 148:
o.discretionaryUpToLimitPrice = fields.pop(0)
if self.serverVersion >= 151:
o.usePriceMgmtAlgo = fields.pop(0)
self.parse(c)
self.parse(o)
self.parse(st)
self.wrapper.openOrder(o.orderId, c, o, st)
def completedOrder(self, fields):
o = Order()
c = Contract()
st = OrderState()
(
_,
c.conId,
c.symbol,
c.secType,
c.lastTradeDateOrContractMonth,
c.strike,
c.right,
c.multiplier,
c.exchange,
c.currency,
c.localSymbol,
c.tradingClass,
o.action,
o.totalQuantity,
o.orderType,
o.lmtPrice,
o.auxPrice,
o.tif,
o.ocaGroup,
o.account,
o.openClose,
o.origin,
o.orderRef,
o.permId,
o.outsideRth,
o.hidden,
o.discretionaryAmt,
o.goodAfterTime,
o.faGroup,
o.faMethod,
o.faPercentage,
o.faProfile,
o.modelCode,
o.goodTillDate,
o.rule80A,
o.percentOffset,
o.settlingFirm,
o.shortSaleSlot,
o.designatedLocation,
o.exemptCode,
o.startingPrice,
o.stockRefPrice,
o.delta,
o.stockRangeLower,
o.stockRangeUpper,
o.displaySize,
o.sweepToFill,
o.allOrNone,
o.minQty,
o.ocaType,
o.triggerMethod,
o.volatility,
o.volatilityType,
o.deltaNeutralOrderType,
o.deltaNeutralAuxPrice,
*fields) = fields
if o.deltaNeutralOrderType:
(
o.deltaNeutralConId,
o.deltaNeutralShortSale,
o.deltaNeutralShortSaleSlot,
o.deltaNeutralDesignatedLocation,
*fields) = fields
(
o.continuousUpdate,
o.referencePriceType,
o.trailStopPrice,
o.trailingPercent,
c.comboLegsDescrip,
*fields) = fields
numLegs = int(fields.pop(0))
c.comboLegs = []
for _ in range(numLegs):
leg = ComboLeg()
(
leg.conId,
leg.ratio,
leg.action,
leg.exchange,
leg.openClose,
leg.shortSaleSlot,
leg.designatedLocation,
leg.exemptCode,
*fields) = fields
self.parse(leg)
c.comboLegs.append(leg)
numOrderLegs = int(fields.pop(0))
o.orderComboLegs = []
for _ in range(numOrderLegs):
leg = OrderComboLeg()
leg.price = fields.pop(0)
self.parse(leg)
o.orderComboLegs.append(leg)
numParams = int(fields.pop(0))
if numParams > 0:
o.smartComboRoutingParams = []
for _ in range(numParams):
tag, value, *fields = fields
o.smartComboRoutingParams.append(
TagValue(tag, value))
(
o.scaleInitLevelSize,
o.scaleSubsLevelSize,
increment,
*fields) = fields
o.scalePriceIncrement = float(increment or UNSET_DOUBLE)
if 0 < o.scalePriceIncrement < UNSET_DOUBLE:
(
o.scalePriceAdjustValue,
o.scalePriceAdjustInterval,
o.scaleProfitOffset,
o.scaleAutoReset,
o.scaleInitPosition,
o.scaleInitFillQty,
o.scaleRandomPercent,
*fields) = fields
o.hedgeType = fields.pop(0)
if o.hedgeType:
o.hedgeParam = fields.pop(0)
(
o.clearingAccount,
o.clearingIntent,
o.notHeld,
dncPresent,
*fields) = fields
if int(dncPresent):
conId, delta, price, *fields = fields
c.deltaNeutralContract = DeltaNeutralContract(
int(conId or 0), float(delta or 0), float(price or 0))
o.algoStrategy = fields.pop(0)
if o.algoStrategy:
numParams = int(fields.pop(0))
if numParams > 0:
o.algoParams = []
for _ in range(numParams):
tag, value, *fields = fields
o.algoParams.append(
TagValue(tag, value))
(
o.solicited,
st.status,
o.randomizeSize,
o.randomizePrice,
*fields) = fields
if o.orderType == 'PEG BENCH':
(
o.referenceContractId,
o.isPeggedChangeAmountDecrease,
o.peggedChangeAmount,
o.referenceChangeAmount,
o.referenceExchangeId,
*fields) = fields
numConditions = int(fields.pop(0))
if numConditions > 0:
for _ in range(numConditions):
condType = int(fields.pop(0))
condCls = OrderCondition.createClass(condType)
n = len(dataclasses.fields(condCls)) - 1
cond = condCls(condType, *fields[:n])
self.parse(cond)
o.conditions.append(cond)
fields = fields[n:]
(
o.conditionsIgnoreRth,
o.conditionsCancelOrder,
*fields) = fields
(
o.trailStopPrice,
o.lmtPriceOffset,
o.cashQty,
*fields) = fields
if self.serverVersion >= 141:
o.dontUseAutoPriceForHedge = fields.pop(0)
if self.serverVersion >= 145:
o.isOmsContainer = fields.pop(0)
(
o.autoCancelDate,
o.filledQuantity,
o.refFuturesConId,
o.autoCancelParent,
o.shareholder,
o.imbalanceOnly,
o.routeMarketableToBbo,
o.parentPermId,
st.completedTime,
st.completedStatus) = fields
self.parse(c)
self.parse(o)
self.parse(st)
self.wrapper.completedOrder(c, o, st)
| bsd-2-clause | -9,177,776,638,024,483,000 | 30.031452 | 78 | 0.487071 | false |
iwm911/plaso | plaso/parsers/winreg_plugins/ccleaner_test.py | 1 | 2650 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the CCleaner Windows Registry plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.parsers.winreg_plugins import ccleaner
from plaso.parsers.winreg_plugins import test_lib
from plaso.winreg import winregistry
__author__ = 'Marc Seguin ([email protected])'
class CCleanerRegistryPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the CCleaner Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = ccleaner.CCleanerPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['NTUSER-CCLEANER.DAT'])
key_path = u'\\Software\\Piriform\\CCleaner'
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_generator = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjects(event_generator)
self.assertEquals(len(event_objects), 17)
event_object = event_objects[0]
self.assertEquals(event_object.timestamp, 1373709794000000)
regvalue_identifier = u'UpdateKey'
expected_value = u'07/13/2013 10:03:14 AM'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_string = u'[{0:s}] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
self._TestGetMessageStrings(event_object, expected_string, expected_string)
event_object = event_objects[2]
self.assertEquals(event_object.timestamp, 0)
regvalue_identifier = u'(App)Delete Index.dat files'
expected_value = u'True'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_string = u'[{0:s}] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
self._TestGetMessageStrings(event_object, expected_string, expected_string)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,662,207,639,177,871,000 | 34.333333 | 79 | 0.728302 | false |
wtolson/gnsq | tests/test_consumer.py | 1 | 6256 | from __future__ import division, with_statement
import os
import pytest
import gevent
from gnsq import NsqdHTTPClient, Consumer, states
from gnsq.errors import NSQSocketError
from integration_server import LookupdIntegrationServer, NsqdIntegrationServer
SLOW_TIMEOUT = int(os.environ.get('SLOW_TIMEOUT', '10'), 10)
def test_basic():
with pytest.raises(ValueError):
Consumer('test', 'test')
with pytest.raises(TypeError):
Consumer(
topic='test',
channel='test',
nsqd_tcp_addresses=None,
lookupd_http_addresses='http://localhost:4161/',
)
with pytest.raises(TypeError):
Consumer(
topic='test',
channel='test',
nsqd_tcp_addresses='localhost:4150',
lookupd_http_addresses=None,
)
def message_handler(consumer, message):
pass
consumer = Consumer(
topic='test',
channel='test',
name='test',
nsqd_tcp_addresses='localhost:4150',
lookupd_http_addresses='http://localhost:4161/',
message_handler=message_handler
)
assert consumer.name == 'test'
assert len(consumer.on_message.receivers) == 1
assert isinstance(consumer.nsqd_tcp_addresses, set)
assert len(consumer.nsqd_tcp_addresses) == 1
assert isinstance(consumer.lookupds, list)
assert len(consumer.lookupds) == 1
@pytest.mark.slow
@pytest.mark.timeout(SLOW_TIMEOUT)
def test_messages():
with NsqdIntegrationServer() as server:
class Accounting(object):
count = 0
total = 500
error = None
conn = NsqdHTTPClient(server.address, server.http_port)
for _ in range(Accounting.total):
conn.publish('test', b'danger zone!')
consumer = Consumer(
topic='test',
channel='test',
nsqd_tcp_addresses=[server.tcp_address],
max_in_flight=100,
)
@consumer.on_exception.connect
def error_handler(consumer, message, error):
if isinstance(error, NSQSocketError):
return
Accounting.error = error
consumer.close()
@consumer.on_message.connect
def handler(consumer, message):
assert message.body == b'danger zone!'
Accounting.count += 1
if Accounting.count == Accounting.total:
consumer.close()
consumer.start()
if Accounting.error:
raise Accounting.error
assert Accounting.count == Accounting.total
@pytest.mark.slow
@pytest.mark.timeout(SLOW_TIMEOUT)
def test_lookupd():
with LookupdIntegrationServer() as lookupd_server:
server1 = NsqdIntegrationServer(lookupd=lookupd_server.tcp_address)
server2 = NsqdIntegrationServer(lookupd=lookupd_server.tcp_address)
with server1, server2:
class Accounting(object):
count = 0
total = 500
concurrency = 0
error = None
for server in (server1, server2):
conn = NsqdHTTPClient(server.address, server.http_port)
for _ in range(Accounting.total // 2):
conn.publish('test', b'danger zone!')
consumer = Consumer(
topic='test',
channel='test',
lookupd_http_addresses=lookupd_server.http_address,
max_in_flight=32,
)
@consumer.on_exception.connect
def error_handler(consumer, message, error):
if isinstance(error, NSQSocketError):
return
Accounting.error = error
consumer.close()
@consumer.on_message.connect
def handler(consumer, message):
assert message.body == b'danger zone!'
Accounting.count += 1
if Accounting.count == Accounting.total:
consumer.close()
gevent.sleep(0.1)
consumer.start()
if Accounting.error:
raise Accounting.error
assert Accounting.count == Accounting.total
@pytest.mark.slow
@pytest.mark.timeout(SLOW_TIMEOUT)
def test_backoff():
with NsqdIntegrationServer() as server:
conn = NsqdHTTPClient(server.address, server.http_port)
for _ in range(500):
conn.publish('test', 'danger zone!')
consumer = Consumer(
topic='test',
channel='test',
nsqd_tcp_addresses=[server.tcp_address],
max_in_flight=100,
message_handler=lambda consumer, message: None
)
consumer.start(block=False)
consumer._redistributed_ready_event.wait()
conn = next(iter(consumer._connections))
consumer._message_backoffs[conn].failure()
consumer._message_backoffs[conn].failure()
consumer._start_backoff(conn)
consumer._redistribute_ready_state()
assert consumer._connections[conn] == states.BACKOFF
assert consumer.total_ready_count == 0
consumer._start_throttled(conn)
consumer._redistribute_ready_state()
consumer._redistribute_ready_state()
assert consumer._connections[conn] == states.THROTTLED
assert consumer.total_ready_count == 1
consumer._message_backoffs[conn].success()
consumer._complete_backoff(conn)
consumer._redistribute_ready_state()
assert consumer._connections[conn] == states.BACKOFF
assert consumer.total_ready_count == 0
consumer._start_throttled(conn)
consumer._redistribute_ready_state()
assert consumer._connections[conn] == states.THROTTLED
assert consumer.total_ready_count == 1
consumer._message_backoffs[conn].success()
consumer._complete_backoff(conn)
consumer._redistribute_ready_state()
assert consumer._connections[conn] == states.RUNNING
assert consumer.total_ready_count == 100
def test_no_handlers():
consumer = Consumer('test', 'test', 'localhost:4150')
with pytest.raises(RuntimeError):
consumer.start(block=False)
| bsd-3-clause | -7,006,606,219,564,931,000 | 28.233645 | 78 | 0.597347 | false |
Bionetbook/bionetbook | bnbapp/config/updateVerbAttrib.py | 2 | 2390 | # updateVerbAttrib
# Author Oren Schaedel
# Date: 9/12/2012
# Version: 1.0
# this script updates the attributes and data structures that each verb recieves.
# The script makes 2 dictionaries, a verb-attribute and an attribute-datatype
# it calls the verbs from the verbs_attributes.txt file in the bionetbook/docs folder
# It calls the data types from attributes_datatype.txt
# The data on these files was manually imported from the google docs spreadsheet 'Attribtue Template' (spelling mistake intended)
# The script opens up each verb.py file, DELETES the old attributes and RE-WRITES the new ones.
# Verb-attribure list is imported from here:
newFile = open('verb_attributes.txt','r')
# import file and put into a list
lines=[]
for line in newFile:
lines.append(line)
tot = len(lines)
# create the verbs dict
verbs={}
for line in lines:
tmp=line.strip('\n').split('\t')
if tmp[0] not in verbs:
verbs[tmp[0]]=[]
verbs[tmp[0]].append(tmp[1])
newFile.close()
print len(verbs), len(verbs.values())
# import the attribute-data type list
datatypes = open('attribute_datatype.txt','r')
# don't use readlines(), it strips the '\t' and hurt sorting lines.
rows = []
for line in datatypes:
rows.append(line)
tot = len(rows)
attributes={}
for i in range(tot):
if rows[i][0]!='\t':
tmp = rows[i].strip('\n').split('\t')
# check for new attribute
if tmp[0] not in attributes:
attributes[tmp[0]]=[]
attributes[tmp[0]].append(tmp[1] + ' = ' + tmp[2])
datatypes.close()
print len(attributes)
#Add the lines to each verb file
for verb in verbs:
try:
current_verb = '../bionetbook/verbs/forms/' + verb +'.py'
current_verb_file = open(current_verb,'r')
lines = current_verb_file.readlines() # read all lines to capture the top 9.
current_verb_file.close()
except IOError:
continue
current_verb_file = open(current_verb,'w')
current_verb_file.writelines(lines[0:8]) # write only the top 9 rows, all attributes will be deleted
for atts in verbs[verb]: # run through all attributes
if atts in attributes: # concurrence between verbs list and attributes list
datatypes = attributes[atts] # list out all datatypes for each attribute
for line in datatypes: # paste each attribute = datatype in a new row.
current_verb_file.write('\n %s' % line) # the four spaces are necessary, not a tab.
current_verb_file.close()
| mit | -692,785,902,141,880,300 | 25.853933 | 129 | 0.705439 | false |
4doemaster/enigma2 | lib/python/Components/ConfigList.py | 2 | 7757 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from config import KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_0, KEY_DELETE, KEY_BACKSPACE, KEY_OK, KEY_TOGGLEOW, KEY_ASCII, KEY_TIMEOUT, KEY_NUMBERS, ConfigElement, ConfigText, ConfigPassword
from Components.ActionMap import NumberActionMap, ActionMap
from enigma import eListbox, eListboxPythonConfigContent, eRCInput, eTimer
from Screens.MessageBox import MessageBox
class ConfigList(HTMLComponent, GUIComponent, object):
def __init__(self, list, session = None):
GUIComponent.__init__(self)
self.l = eListboxPythonConfigContent()
self.l.setSeperation(200)
self.timer = eTimer()
self.list = list
self.onSelectionChanged = [ ]
self.current = None
self.session = session
def execBegin(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
self.timer.callback.append(self.timeout)
def execEnd(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.timer.callback.remove(self.timeout)
def toggle(self):
selection = self.getCurrent()
selection[1].toggle()
self.invalidateCurrent()
def handleKey(self, key):
selection = self.getCurrent()
if selection and selection[1].enabled:
selection[1].handleKey(key)
self.invalidateCurrent()
if key in KEY_NUMBERS:
self.timer.start(1000, 1)
def getCurrent(self):
return self.l.getCurrentSelection()
def getCurrentIndex(self):
return self.l.getCurrentSelectionIndex()
def setCurrentIndex(self, index):
if self.instance is not None:
self.instance.moveSelectionTo(index)
def invalidateCurrent(self):
self.l.invalidateEntry(self.l.getCurrentSelectionIndex())
def invalidate(self, entry):
# when the entry to invalidate does not exist, just ignore the request.
# this eases up conditional setup screens a lot.
if entry in self.__list:
self.l.invalidateEntry(self.__list.index(entry))
GUI_WIDGET = eListbox
def selectionChanged(self):
if isinstance(self.current,tuple) and len(self.current) == 2:
self.current[1].onDeselect(self.session)
self.current = self.getCurrent()
if isinstance(self.current,tuple) and len(self.current) == 2:
self.current[1].onSelect(self.session)
else:
return
for x in self.onSelectionChanged:
x()
def postWidgetCreate(self, instance):
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
def preWidgetRemove(self, instance):
if isinstance(self.current,tuple) and len(self.current) == 2:
self.current[1].onDeselect(self.session)
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def setList(self, l):
self.timer.stop()
self.__list = l
self.l.setList(self.__list)
if l is not None:
for x in l:
assert len(x) < 2 or isinstance(x[1], ConfigElement), "entry in ConfigList " + str(x[1]) + " must be a ConfigElement"
def getList(self):
return self.__list
list = property(getList, setList)
def timeout(self):
self.handleKey(KEY_TIMEOUT)
def isChanged(self):
is_changed = False
for x in self.list:
is_changed |= x[1].isChanged()
return is_changed
class ConfigListScreen:
def __init__(self, list, session = None, on_change = None):
self["config_actions"] = NumberActionMap(["SetupActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.keyOK,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"toggleOverwrite": self.keyToggleOW,
"pageUp": self.keyPageUp,
"pageDown": self.keyPageDown,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1) # to prevent left/right overriding the listbox
self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
self["config"] = ConfigList(list, session = session)
if on_change is not None:
self.__changed = on_change
else:
self.__changed = lambda: None
if not self.handleInputHelpers in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.handleInputHelpers)
def handleInputHelpers(self):
if self["config"].getCurrent() is not None:
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def KeyText(self):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].getValue())
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
def keyOK(self):
self["config"].handleKey(KEY_OK)
def keyLeft(self):
self["config"].handleKey(KEY_LEFT)
self.__changed()
def keyRight(self):
self["config"].handleKey(KEY_RIGHT)
self.__changed()
def keyHome(self):
self["config"].handleKey(KEY_HOME)
self.__changed()
def keyEnd(self):
self["config"].handleKey(KEY_END)
self.__changed()
def keyDelete(self):
self["config"].handleKey(KEY_DELETE)
self.__changed()
def keyBackspace(self):
self["config"].handleKey(KEY_BACKSPACE)
self.__changed()
def keyToggleOW(self):
self["config"].handleKey(KEY_TOGGLEOW)
self.__changed()
def keyGotAscii(self):
self["config"].handleKey(KEY_ASCII)
self.__changed()
def keyNumberGlobal(self, number):
self["config"].handleKey(KEY_0 + number)
self.__changed()
def keyPageDown(self):
if self["config"].getCurrentIndex() + 10 <= (len(self["config"].getList()) - 1):
self["config"].setCurrentIndex(self["config"].getCurrentIndex() + 10)
else:
self["config"].setCurrentIndex((len(self["config"].getList()) - 1))
def keyPageUp(self):
if self["config"].getCurrentIndex() - 10 > 0:
self["config"].setCurrentIndex(self["config"].getCurrentIndex() - 10)
else:
self["config"].setCurrentIndex(0)
def saveAll(self):
for x in self["config"].list:
x[1].save()
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def closeMenuList(self, recursive = False):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close(recursive)
def keyCancel(self):
self.closeMenuList()
def closeRecursive(self):
self.closeMenuList(True)
| gpl-2.0 | -7,962,493,640,466,334,000 | 28.949807 | 193 | 0.709295 | false |
bjzhang/xen_arm | tools/python/xen/xend/XendBootloader.py | 35 | 7321 | #
# XendBootloader.py - Framework to run a boot loader for picking the kernel
#
# Copyright 2005-2006 Red Hat, Inc.
# Jeremy Katz <[email protected]>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os, select, errno, stat, signal, tty
import random
import shlex
from xen.xend import sxp
from xen.util import mkdir, oshelp
from XendLogging import log
from XendError import VmError
import pty, termios, fcntl
from xen.lowlevel import ptsname
def bootloader(blexec, disk, dom, quiet = False, blargs = '', kernel = '',
ramdisk = '', kernel_args = ''):
"""Run the boot loader executable on the given disk and return a
config image.
@param blexec Binary to use as the boot loader
@param disk Disk to run the boot loader on.
@param dom DomainInfo representing the domain being booted.
@param quiet Run in non-interactive mode, just booting the default.
@param blargs Arguments to pass to the bootloader."""
if not os.access(blexec, os.X_OK):
msg = "Bootloader isn't executable"
log.error(msg)
raise VmError(msg)
if not os.access(disk, os.R_OK):
msg = "Disk isn't accessible"
log.error(msg)
raise VmError(msg)
if os.uname()[0] == "NetBSD" and disk.startswith('/dev/'):
disk = "/r".join(disk.rsplit("/",1))
mkdir.parents("/var/run/xend/boot/", stat.S_IRWXU)
while True:
fifo = "/var/run/xend/boot/xenbl.%s" %(random.randint(0, 32000),)
try:
os.mkfifo(fifo, 0600)
except OSError, e:
if (e.errno != errno.EEXIST):
raise
break
# We need to present the bootloader's tty as a pty slave that xenconsole
# can access. Since the bootloader itself needs a pty slave,
# we end up with a connection like this:
#
# xenconsole -- (slave pty1 master) <-> (master pty2 slave) -- bootloader
#
# where we copy characters between the two master fds, as well as
# listening on the bootloader's fifo for the results.
(m1, s1) = pty.openpty()
# On Solaris, the pty master side will get cranky if we try
# to write to it while there is no slave. To work around this,
# keep the slave descriptor open until we're done. Set it
# to raw terminal parameters, otherwise it will echo back
# characters, which will confuse the I/O loop below.
# Furthermore, a raw master pty device has no terminal
# semantics on Solaris, so don't try to set any attributes
# for it.
if os.uname()[0] != 'SunOS' and os.uname()[0] != 'NetBSD':
tty.setraw(m1)
os.close(s1)
else:
tty.setraw(s1)
fcntl.fcntl(m1, fcntl.F_SETFL, os.O_NDELAY)
slavename = ptsname.ptsname(m1)
dom.storeDom("console/tty", slavename)
# Release the domain lock here, because we definitely don't want
# a stuck bootloader to deny service to other xend clients.
from xen.xend import XendDomain
domains = XendDomain.instance()
domains.domains_lock.release()
(child, m2) = pty.fork()
if (not child):
args = [ blexec ]
if kernel:
args.append("--kernel=%s" % kernel)
if ramdisk:
args.append("--ramdisk=%s" % ramdisk)
if kernel_args:
args.append("--args=%s" % kernel_args)
if quiet:
args.append("-q")
args.append("--output=%s" % fifo)
if blargs:
args.extend(shlex.split(blargs))
args.append(disk)
try:
log.debug("Launching bootloader as %s." % str(args))
env = os.environ.copy()
env['TERM'] = 'vt100'
oshelp.close_fds()
os.execvpe(args[0], args, env)
except OSError, e:
print e
pass
os._exit(1)
# record that this domain is bootloading
dom.bootloader_pid = child
# On Solaris, the master pty side does not have terminal semantics,
# so don't try to set any attributes, as it will fail.
if os.uname()[0] != 'SunOS':
tty.setraw(m2);
fcntl.fcntl(m2, fcntl.F_SETFL, os.O_NDELAY);
while True:
try:
r = os.open(fifo, os.O_RDONLY)
except OSError, e:
if e.errno == errno.EINTR:
continue
break
fcntl.fcntl(r, fcntl.F_SETFL, os.O_NDELAY);
ret = ""
inbuf=""; outbuf="";
# filedescriptors:
# r - input from the bootloader (bootstring output)
# m1 - input/output from/to xenconsole
# m2 - input/output from/to pty that controls the bootloader
# The filedescriptors are NDELAY, so it's ok to try to read
# bigger chunks than may be available, to keep e.g. curses
# screen redraws in the bootloader efficient. m1 is the side that
# gets xenconsole input, which will be keystrokes, so a small number
# is sufficient. m2 is pygrub output, which will be curses screen
# updates, so a larger number (1024) is appropriate there.
#
# For writeable descriptors, only include them in the set for select
# if there is actual data to write, otherwise this would loop too fast,
# eating up CPU time.
while True:
wsel = []
if len(outbuf) != 0:
wsel = wsel + [m1]
if len(inbuf) != 0:
wsel = wsel + [m2]
sel = select.select([r, m1, m2], wsel, [])
try:
if m1 in sel[0]:
s = os.read(m1, 16)
inbuf += s
if m2 in sel[1]:
n = os.write(m2, inbuf)
inbuf = inbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
try:
if m2 in sel[0]:
s = os.read(m2, 1024)
outbuf += s
if m1 in sel[1]:
n = os.write(m1, outbuf)
outbuf = outbuf[n:]
except OSError, e:
if e.errno == errno.EIO:
pass
if r in sel[0]:
s = os.read(r, 128)
ret = ret + s
if len(s) == 0:
break
del inbuf
del outbuf
os.waitpid(child, 0)
os.close(r)
os.close(m2)
os.close(m1)
if os.uname()[0] == 'SunOS' or os.uname()[0] == 'NetBSD':
os.close(s1)
os.unlink(fifo)
# Re-acquire the lock to cover the changes we're about to make
# when we return to domain creation.
domains.domains_lock.acquire()
if dom.bootloader_pid is None:
msg = "Domain was died while the bootloader was running."
log.error(msg)
raise VmError, msg
dom.bootloader_pid = None
if len(ret) == 0:
msg = "Boot loader didn't return any data!"
log.error(msg)
raise VmError, msg
pin = sxp.Parser()
pin.input(ret)
pin.input_eof()
blcfg = pin.val
return blcfg
def bootloader_tidy(dom):
if hasattr(dom, "bootloader_pid") and dom.bootloader_pid is not None:
pid = dom.bootloader_pid
dom.bootloader_pid = None
os.kill(pid, signal.SIGKILL)
| gpl-2.0 | -4,302,239,368,442,828,000 | 30.830435 | 77 | 0.586395 | false |
nopple/ctf | dosfun4u/pwn.py | 1 | 3301 | #!/usr/bin/env python
import socket, subprocess, sys
from struct import pack, unpack
global scenes
global officers
scenes = {}
officers = {}
remote = len(sys.argv) > 1
PORT = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if remote:
HOST = "dosfun4u_5d712652e1d06a362f7fc6d12d66755b.2014.shallweplayaga.me"
else:
HOST = '127.0.0.1'
def chksum(data):
ret = 0
for d in data:
ret += ord(d)
return ret & 0xffff
def add_officer(officer_id, status=0, x=0, y=0):
global officers
print 'update' if officers.has_key(officer_id) and officers[officer_id] else 'add', 'officer', hex(officer_id)
officers[officer_id] = True
payload = pack('H', 0x7d0)
payload += pack('H', officer_id)
payload += pack('H', status)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('H', 0x0)
return payload
def remove_officer(officer_id):
global officers
print 'remove officer', hex(officer_id), 'should work' if officers.has_key(officer_id) and officers[officer_id] else 'should fail'
officers[officer_id] = False
payload = pack('H', 0xbb8)
payload += pack('H', officer_id)
return payload
def add_scene(scene_id, data2, data3, inline_data='', x=0, y=0):
global scenes
print 'update' if scenes.has_key(scene_id) and scenes[scene_id] else 'add', 'scene', hex(scene_id)
scenes[scene_id] = True
size1 = len(inline_data)/2
size2 = len(data2)
size3 = len(data3)
payload = pack('H', 0xfa0)
payload += pack('H', scene_id)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('B', size1)
payload += pack('B', size2)
payload += pack('H', size3)
payload += pack('H', 0)
payload += inline_data[:size1*2]
payload += data2
payload += data3
return payload
def recv_all(s, size):
ret = []
received = 0
while size > received:
c = s.recv(size-received)
if c == '':
raise Exception('Connection closed')
ret.append(c)
received += len(c)
return ''.join(ret)
def recv_until(s, pattern):
ret = ''
while True:
c = s.recv(1)
if c == '':
raise Exception("Connection closed")
ret += c
if ret.find(pattern) != -1:
break
return ret
s.connect((HOST, PORT))
if remote:
print s.recv(4096)
buf = s.recv(4096)
print buf
data = buf.split(' ')[0]
print 'challenge = {}'.format(data)
print 'hashcatting...'
p = subprocess.Popen(['./hashcat', data], stdout=subprocess.PIPE);
result = p.communicate()[0].strip('\n\r\t ')
print 'response = {}'.format(result)
s.send(result)
def send_cmd(s,payload,recvLen=0):
payload += pack('H', chksum(payload))
s.send(payload)
return recv_all(s, recvLen)
shellcode = open('shellcode', 'rb').read()
print 'Getting block into free-list'
send_cmd(s,add_officer(1),5)
send_cmd(s,remove_officer(1),5)
print 'Adding officer to reuse block from free-list'
send_cmd(s,add_officer(0xc),5)
print 'Writing shellcode to 008f:0000'
send_cmd(s,add_scene(1, pack("<HHHHHH", 0xc, 0, 0x4688, 0x8f, 0, 0), shellcode),5)
print 'Modifying officer structure to include pointer to fake officer on stack'
send_cmd(s,add_scene(2, pack("<HHHHHH", 1, 0, 0, 0, 0x47aa, 0x011f), "lolololol"),5)
print 'Writing return to shellcode on stack'
send_cmd(s,add_officer(0x945, 0x1d26, 0x10, 0x97),5)
print 'Receiving response...'
print 'Key 1:', recv_until(s,'\n').replace('\x00', '')[:-1]
print 'Key 2:', recv_until(s,'\n')[:-1]
| mit | 8,916,484,444,888,102,000 | 25.620968 | 131 | 0.668585 | false |
chainer/chainercv | examples/deeplab/demo.py | 3 | 2484 | import argparse
import matplotlib.pyplot as plt
import chainer
from chainercv.datasets import ade20k_semantic_segmentation_label_colors
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import cityscapes_semantic_segmentation_label_colors
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import voc_semantic_segmentation_label_colors
from chainercv.datasets import voc_semantic_segmentation_label_names
from chainercv.links import DeepLabV3plusXception65
from chainercv import utils
from chainercv.visualizations import vis_image
from chainercv.visualizations import vis_semantic_segmentation
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--min-input-size', type=int, default=None)
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'voc'),
default='cityscapes')
parser.add_argument('image')
args = parser.parse_args()
if args.dataset == 'cityscapes':
if args.pretrained_model is None:
args.pretrained_model = 'cityscapes'
label_names = cityscapes_semantic_segmentation_label_names
colors = cityscapes_semantic_segmentation_label_colors
elif args.dataset == 'ade20k':
if args.pretrained_model is None:
args.pretrained_model = 'ade20k'
label_names = ade20k_semantic_segmentation_label_names
colors = ade20k_semantic_segmentation_label_colors
elif args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc'
label_names = voc_semantic_segmentation_label_names
colors = voc_semantic_segmentation_label_colors
model = DeepLabV3plusXception65(
pretrained_model=args.pretrained_model,
min_input_size=args.min_input_size)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
labels = model.predict([img])
label = labels[0]
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
vis_image(img, ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
# Do not overlay the label image on the color image
vis_semantic_segmentation(
None, label, label_names, colors, ax=ax2)
plt.show()
if __name__ == '__main__':
main()
| mit | -4,225,053,425,650,061,000 | 35 | 76 | 0.702093 | false |
Xeralux/tensorflow | tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py | 4 | 2141 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.datasets.fashion_mnist.load_data')
def load_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 | 3,402,314,261,342,004,000 | 33.532258 | 80 | 0.672583 | false |
sammyshj/stem | test/unit/tutorial_examples.py | 1 | 13989 | """
Tests for the examples given in stem's tutorial.
"""
import itertools
import os
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import stem.response
import stem.descriptor.remote
import stem.prereq
import test.runner
from stem import str_type
from stem.control import Controller
from stem.descriptor.remote import DIRECTORY_AUTHORITIES
from test import mocking
from test.unit import exec_documentation_example
from test.mocking import (
get_relay_server_descriptor,
get_router_status_entry_v3,
ROUTER_STATUS_ENTRY_V3_HEADER,
get_network_status_document_v3,
)
try:
# added in python 3.3
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
OPEN_FUNCTION = open # make a reference so mocking open() won't mess with us
CIRC_CONTENT = '650 CIRC %d %s \
%s \
PURPOSE=%s'
PATH_CONTENT = '$%s=%s,$%s=%s,$%s=%s'
LIST_CIRCUITS_OUTPUT = str_type("""\
Circuit 4 (GENERAL)
|- B1FA7D51B8B6F0CB585D944F450E7C06EDE7E44C (ByTORAndTheSnowDog, 173.209.180.61)
|- 0DD9935C5E939CFA1E07B8DDA6D91C1A2A9D9338 (afo02, 87.238.194.176)
+- DB3B1CFBD3E4D97B84B548ADD5B9A31451EEC4CC (edwardsnowden3, 109.163.234.10)
Circuit 6 (GENERAL)
|- B1FA7D51B8B6F0CB585D944F450E7C06EDE7E44C (ByTORAndTheSnowDog, 173.209.180.61)
|- EC01CB4766BADC1611678555CE793F2A7EB2D723 (sprockets, 46.165.197.96)
+- 9EA317EECA56BDF30CAEB208A253FB456EDAB1A0 (bolobolo1, 96.47.226.20)
Circuit 10 (GENERAL)
|- B1FA7D51B8B6F0CB585D944F450E7C06EDE7E44C (ByTORAndTheSnowDog, 173.209.180.61)
|- 00C2C2A16AEDB51D5E5FB7D6168FC66B343D822F (ph3x, 86.59.119.83)
+- 65242C91BFF30F165DA4D132C81A9EBA94B71D62 (torexit16, 176.67.169.171)
""")
EXIT_USED_OUTPUT = str_type("""\
Tracking requests for tor exits. Press 'enter' to end.
Exit relay for our connection to 64.15.112.44:80
address: 31.172.30.2:443
fingerprint: A59E1E7C7EAEE083D756EE1FF6EC31CA3D8651D7
nickname: chaoscomputerclub19
locale: unknown
""")
OUTDATED_RELAYS_OUTPUT = str_type("""\
Checking for outdated relays...
0.1.0 Sambuddha Basu
2 outdated relays found, 1 had contact information
""")
COMPARE_FLAGS_OUTPUT = """\
maatuska has the Running flag but moria1 doesn't: E2BB13AA2F6960CD93ABE5257A825687F3973C62
moria1 has the Running flag but maatuska doesn't: 546C54E2A89D88E0794D04AECBF1AC8AC9DA81DE
maatuska has the Running flag but moria1 doesn't: 92FCB6748A40E6088E22FBAB943AB2DD743EA818
maatuska has the Running flag but moria1 doesn't: 6871F682350BA931838C0EC1E4A23044DAE06A73
moria1 has the Running flag but maatuska doesn't: DCAEC3D069DC39AAE43D13C8AF31B5645E05ED61
"""
VOTES_BY_BANDWIDTH_AUTHORITIES_OUTPUT = """\
Getting gabelmoo's vote from http://131.188.40.189:80/tor/status-vote/current/authority:
5935 measured entries and 1332 unmeasured
Getting moria1's vote from http://128.31.0.39:9131/tor/status-vote/current/authority:
6647 measured entries and 625 unmeasured
Getting maatuska's vote from http://171.25.193.9:443/tor/status-vote/current/authority:
6313 measured entries and 1112 unmeasured
"""
PERSISTING_A_CONSENSUS_OUTPUT = """\
A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB: caerSidi
"""
def _get_event(content):
controller_event = mocking.get_message(content)
stem.response.convert('EVENT', controller_event)
return controller_event
def _get_circ_event(id, status, hop1, hop2, hop3, purpose):
path = PATH_CONTENT % (hop1[0], hop1[1], hop2[0], hop2[1], hop3[0], hop3[1])
content = CIRC_CONTENT % (id, status, path, purpose)
return _get_event(content)
def _get_router_status(address = None, port = None, nickname = None, fingerprint_base64 = None, s_line = None):
r_line = ROUTER_STATUS_ENTRY_V3_HEADER[0][1]
if address:
r_line = r_line.replace('71.35.150.29', address)
if port:
r_line = r_line.replace('9001', port)
if nickname:
r_line = r_line.replace('caerSidi', nickname)
if fingerprint_base64:
r_line = r_line.replace('p1aag7VwarGxqctS7/fS0y5FU+s', fingerprint_base64)
if s_line:
return get_router_status_entry_v3({'r': r_line, 's': s_line})
else:
return get_router_status_entry_v3({'r': r_line})
class TestTutorialExamples(unittest.TestCase):
def assert_equal_unordered(self, expected, actual):
if stem.prereq.is_python_3():
self.assertCountEqual(expected.splitlines(), actual.splitlines())
else:
self.assertItemsEqual(expected.splitlines(), actual.splitlines())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.control.Controller.from_port', spec = Controller)
def test_list_circuits(self, from_port_mock, stdout_mock):
path_1 = ('B1FA7D51B8B6F0CB585D944F450E7C06EDE7E44C', 'ByTORAndTheSnowDog')
path_2 = ('0DD9935C5E939CFA1E07B8DDA6D91C1A2A9D9338', 'afo02')
path_3 = ('DB3B1CFBD3E4D97B84B548ADD5B9A31451EEC4CC', 'edwardsnowden3')
path_4 = ('EC01CB4766BADC1611678555CE793F2A7EB2D723', 'sprockets')
path_5 = ('9EA317EECA56BDF30CAEB208A253FB456EDAB1A0', 'bolobolo1')
path_6 = ('00C2C2A16AEDB51D5E5FB7D6168FC66B343D822F', 'ph3x')
path_7 = ('65242C91BFF30F165DA4D132C81A9EBA94B71D62', 'torexit16')
circuit_4 = _get_circ_event(4, 'BUILT', path_1, path_2, path_3, 'GENERAL')
circuit_6 = _get_circ_event(6, 'BUILT', path_1, path_4, path_5, 'GENERAL')
circuit_10 = _get_circ_event(10, 'BUILT', path_1, path_6, path_7, 'GENERAL')
controller = from_port_mock().__enter__()
controller.get_circuits.return_value = [circuit_4, circuit_6, circuit_10]
controller.get_network_status.side_effect = lambda fingerprint, *args: {
path_1[0]: _get_router_status('173.209.180.61'),
path_2[0]: _get_router_status('87.238.194.176'),
path_3[0]: _get_router_status('109.163.234.10'),
path_4[0]: _get_router_status('46.165.197.96'),
path_5[0]: _get_router_status('96.47.226.20'),
path_6[0]: _get_router_status('86.59.119.83'),
path_7[0]: _get_router_status('176.67.169.171')
}[fingerprint]
exec_documentation_example('list_circuits.py')
self.assert_equal_unordered(LIST_CIRCUITS_OUTPUT, stdout_mock.getvalue())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.control.Controller.from_port', spec = Controller)
def test_exit_used(self, from_port_mock, stdout_mock):
def tutorial_example(mock_event):
import functools
from stem import StreamStatus
from stem.control import EventType, Controller
def main():
print("Tracking requests for tor exits. Press 'enter' to end.\n")
with Controller.from_port() as controller:
controller.authenticate()
stream_listener = functools.partial(stream_event, controller)
controller.add_event_listener(stream_listener, EventType.STREAM)
stream_event(controller, mock_event) # simulate an event during the raw_input()
def stream_event(controller, event):
if event.status == StreamStatus.SUCCEEDED and event.circ_id:
circ = controller.get_circuit(event.circ_id)
exit_fingerprint = circ.path[-1][0]
exit_relay = controller.get_network_status(exit_fingerprint)
print('Exit relay for our connection to %s' % (event.target))
print(' address: %s:%i' % (exit_relay.address, exit_relay.or_port))
print(' fingerprint: %s' % exit_relay.fingerprint)
print(' nickname: %s' % exit_relay.nickname)
print(' locale: %s\n' % controller.get_info('ip-to-country/%s' % exit_relay.address, 'unknown'))
main()
path_1 = ('9EA317EECA56BDF30CAEB208A253FB456EDAB1A0', 'bolobolo1')
path_2 = ('00C2C2A16AEDB51D5E5FB7D6168FC66B343D822F', 'ph3x')
path_3 = ('A59E1E7C7EAEE083D756EE1FF6EC31CA3D8651D7', 'chaoscomputerclub19')
circuit = _get_circ_event(1, 'BUILT', path_1, path_2, path_3, 'GENERAL')
event_content = '650 STREAM 15 SUCCEEDED 3 64.15.112.44:80'
event = _get_event(event_content)
controller = from_port_mock().__enter__()
controller.get_circuit.return_value = circuit
controller.get_network_status.return_value = _get_router_status('31.172.30.2', '443', path_3[1], 'pZ4efH6u4IPXVu4f9uwxyj2GUdc=')
controller.get_info.return_value = 'unknown'
tutorial_example(event)
self.assert_equal_unordered(EXIT_USED_OUTPUT, stdout_mock.getvalue())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.descriptor.remote.DescriptorDownloader')
def test_outdated_relays(self, downloader_mock, stdout_mock):
downloader_mock().get_server_descriptors.return_value = [
get_relay_server_descriptor({'platform': 'node-Tor 0.2.3.0 on Linux x86_64'}),
get_relay_server_descriptor({'platform': 'node-Tor 0.1.0 on Linux x86_64'}),
get_relay_server_descriptor({'opt': 'contact Random Person [email protected]', 'platform': 'node-Tor 0.2.3.0 on Linux x86_64'}),
get_relay_server_descriptor({'opt': 'contact Sambuddha Basu', 'platform': 'node-Tor 0.1.0 on Linux x86_64'}),
]
exec_documentation_example('outdated_relays.py')
self.assert_equal_unordered(OUTDATED_RELAYS_OUTPUT, stdout_mock.getvalue())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.descriptor.remote.Query')
@patch('stem.descriptor.remote.get_authorities')
def test_compare_flags(self, get_authorities_mock, query_mock, stdout_mock):
if stem.prereq._is_python_26():
# example imports OrderedDict from collections which doesn't work under
# python 2.6
test.runner.skip(self, "(example doesn't support python 2.6)")
return
get_authorities_mock().items.return_value = [('moria1', DIRECTORY_AUTHORITIES['moria1']), ('maatuska', DIRECTORY_AUTHORITIES['maatuska'])]
fingerprint = [
('92FCB6748A40E6088E22FBAB943AB2DD743EA818', 'kvy2dIpA5giOIvurlDqy3XQ+qBg='),
('6871F682350BA931838C0EC1E4A23044DAE06A73', 'aHH2gjULqTGDjA7B5KIwRNrganM='),
('E2BB13AA2F6960CD93ABE5257A825687F3973C62', '4rsTqi9pYM2Tq+UleoJWh/OXPGI='),
('546C54E2A89D88E0794D04AECBF1AC8AC9DA81DE', 'VGxU4qidiOB5TQSuy/Gsisnagd4='),
('DCAEC3D069DC39AAE43D13C8AF31B5645E05ED61', '3K7D0GncOarkPRPIrzG1ZF4F7WE='),
]
entry = [
# entries for moria1
_get_router_status(fingerprint_base64 = fingerprint[0][1], s_line = ' '),
_get_router_status(fingerprint_base64 = fingerprint[1][1], s_line = ' '),
_get_router_status(fingerprint_base64 = fingerprint[2][1], s_line = ' '),
_get_router_status(fingerprint_base64 = fingerprint[3][1]),
_get_router_status(fingerprint_base64 = fingerprint[4][1]),
# entries for maatuska
_get_router_status(fingerprint_base64 = fingerprint[0][1]),
_get_router_status(fingerprint_base64 = fingerprint[1][1]),
_get_router_status(fingerprint_base64 = fingerprint[2][1]),
_get_router_status(fingerprint_base64 = fingerprint[3][1], s_line = ' '),
_get_router_status(fingerprint_base64 = fingerprint[4][1], s_line = ' '),
]
query_mock().run.side_effect = [
[get_network_status_document_v3(routers = (entry[0], entry[1], entry[2], entry[3], entry[4]))],
[get_network_status_document_v3(routers = (entry[5], entry[6], entry[7], entry[8], entry[9]))],
]
exec_documentation_example('compare_flags.py')
self.assert_equal_unordered(COMPARE_FLAGS_OUTPUT, stdout_mock.getvalue())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.descriptor.remote.get_authorities')
@patch('stem.descriptor.remote.DescriptorDownloader.query')
def test_votes_by_bandwidth_authorities(self, query_mock, get_authorities_mock, stdout_mock):
directory_values = [
DIRECTORY_AUTHORITIES['gabelmoo'],
DIRECTORY_AUTHORITIES['moria1'],
DIRECTORY_AUTHORITIES['maatuska'],
]
directory_values[0].address = '131.188.40.189'
get_authorities_mock().values.return_value = directory_values
entry_with_measurement = get_router_status_entry_v3({'w': 'Bandwidth=1 Measured=1'})
entry_without_measurement = get_router_status_entry_v3()
query1 = Mock()
query1.download_url = 'http://131.188.40.189:80/tor/status-vote/current/authority'
query1.run.return_value = [entry_with_measurement] * 5935 + [entry_without_measurement] * 1332
query2 = Mock()
query2.download_url = 'http://128.31.0.39:9131/tor/status-vote/current/authority'
query2.run.return_value = [entry_with_measurement] * 6647 + [entry_without_measurement] * 625
query3 = Mock()
query3.download_url = 'http://171.25.193.9:443/tor/status-vote/current/authority'
query3.run.return_value = [entry_with_measurement] * 6313 + [entry_without_measurement] * 1112
query_mock.side_effect = [query1, query2, query3]
exec_documentation_example('votes_by_bandwidth_authorities.py')
self.assert_equal_unordered(VOTES_BY_BANDWIDTH_AUTHORITIES_OUTPUT, stdout_mock.getvalue())
@patch('sys.stdout', new_callable = StringIO)
@patch('stem.descriptor.parse_file')
@patch('%s.open' % __name__, create = True)
@patch('stem.descriptor.remote.Query')
def test_persisting_a_consensus(self, query_mock, open_mock, parse_file_mock, stdout_mock):
def tutorial_example_2():
from stem.descriptor import DocumentHandler, parse_file
consensus = next(parse_file(
'/tmp/descriptor_dump',
descriptor_type = 'network-status-consensus-3 1.0',
document_handler = DocumentHandler.DOCUMENT,
))
for fingerprint, relay in consensus.routers.items():
print('%s: %s' % (fingerprint, relay.nickname))
network_status = get_network_status_document_v3(routers = (get_router_status_entry_v3(),))
query_mock().run.return_value = [network_status]
parse_file_mock.return_value = itertools.cycle([network_status])
exec_documentation_example('persisting_a_consensus.py')
exec_documentation_example('persisting_a_consensus_with_parse_file.py')
self.assertEqual(PERSISTING_A_CONSENSUS_OUTPUT, stdout_mock.getvalue())
if os.path.exists('/tmp/descriptor_dump'):
os.remove('/tmp/descriptor_dump')
| lgpl-3.0 | 3,071,368,320,061,240,300 | 38.968571 | 142 | 0.70777 | false |
azogue/esiosdata | tests/test_pvpc_plots.py | 1 | 2211 | # -*- coding: utf-8 -*-
"""
Test Cases para datos de PVPC
"""
import os
from unittest import TestCase
class TestsPVPCPlots(TestCase):
"""Tests para el almacén local de datos de PVPC."""
def test_plots_matplotlib(self):
"""Test de plots de datos de PVPC."""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from esiosdata import PVPC
from esiosdata.pvpcplot import (pvpcplot_ev_scatter, pvpcplot_grid_hora, pvpcplot_tarifas_hora,
pvpcplot_fill_tarifa, FIGSIZE, TARIFAS)
dest_path = os.path.dirname(os.path.abspath(__file__))
params_savefig = dict(dpi=150, orientation='landscape', transparent=True, pad_inches=0.01)
pvpc = PVPC(update=True, verbose=True)
df_pvpc = pvpc.data['data']
pvpc_mean_daily, pvpc_mean_monthly = pvpc.get_resample_data()
# PLOTS EV. DIARIA Y MENSUAL:
pvpcplot_ev_scatter(pvpc_mean_daily, pvpc_mean_monthly, tarifa='VHC', superposic_anual=False)
pvpcplot_ev_scatter(pvpc_mean_daily, pvpc_mean_monthly, tarifa='GEN')
fig, ax = plt.subplots(1, 1, figsize=FIGSIZE)
pvpcplot_ev_scatter(pvpc_mean_daily, pvpc_mean_monthly, tarifa='NOC', ax=ax, plot=False)
fig.savefig(os.path.join(dest_path, 'test_pvpc_monthly.png'), **params_savefig)
fig, ax = plt.subplots(figsize=FIGSIZE)
for k in TARIFAS:
pvpcplot_ev_scatter(pvpc_mean_daily, pvpc_mean_monthly, tarifa=k, superposic_anual=False, ax=ax, plot=False)
fig.savefig(os.path.join(dest_path, 'test_pvpc_scatter.png'), **params_savefig)
# PLOTS DIARIOS (O DE INTERVALO HORARIO):
df_day = df_pvpc.loc['2016-02-23']
pvpcplot_grid_hora(df_day)
pvpcplot_grid_hora(df_pvpc.loc['2016-02-10':'2016-02-23'])
pvpcplot_tarifas_hora(df_pvpc.loc['2016-02-10':'2016-02-23'])
# Fill tarifa
fig, ax = plt.subplots(figsize=FIGSIZE)
pvpcplot_fill_tarifa(df_pvpc.loc['2017-01-25'], ax=ax, show=False)
fig.savefig(os.path.join(dest_path, 'test_pvpc_tarifa.png'), **params_savefig)
pvpcplot_fill_tarifa(df_pvpc.loc['2017-01-25'])
| mit | 5,550,065,073,658,788,000 | 39.181818 | 120 | 0.638009 | false |
jiwanlimbu/aura | keystone/tests/unit/utils.py | 4 | 3895 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Useful utilities for tests."""
import functools
import os
import time
import uuid
import six
from testtools import testcase
TZ = None
def timezone(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz_original = os.environ.get('TZ')
try:
if TZ:
os.environ['TZ'] = TZ
time.tzset()
return func(*args, **kwargs)
finally:
if TZ:
if tz_original:
os.environ['TZ'] = tz_original
else:
if 'TZ' in os.environ:
del os.environ['TZ']
time.tzset()
return wrapper
def new_uuid():
"""Return a string UUID."""
return uuid.uuid4().hex
def wip(message, expected_exception=Exception, bug=None):
"""Mark a test as work in progress.
Based on code by Nat Pryce:
https://gist.github.com/npryce/997195#file-wip-py
The test will always be run. If the test fails then a TestSkipped
exception is raised. If the test passes an AssertionError exception
is raised so that the developer knows they made the test pass. This
is a reminder to remove the decorator.
:param message: a string message to help clarify why the test is
marked as a work in progress
:param expected_exception: an exception class that will be checked for
when @wip verifies an exception is raised. The
test will fail if a different exception is
raised. Default is "any" exception is valid
:param bug: (optional) a string for tracking the bug and what bug should
cause the @wip decorator to be removed from the testcase
Usage:
>>> @wip('Expected Error', expected_exception=Exception, bug="#000000")
>>> def test():
>>> pass
"""
if bug:
bugstr = " (BugID " + bug + ")"
else:
bugstr = ""
def _wip(f):
@six.wraps(f)
def run_test(*args, **kwargs):
__e = None
try:
f(*args, **kwargs)
except Exception as __e:
if (expected_exception != Exception and
not isinstance(__e, expected_exception)):
raise AssertionError(
'Work In Progress Test Failed%(bugstr)s with '
'unexpected exception. Expected "%(expected)s" '
'got "%(exception)s": %(message)s ' %
{'message': message, 'bugstr': bugstr,
'expected': expected_exception.__class__.__name__,
'exception': __e.__class__.__name__})
# NOTE(notmorgan): We got the expected exception we can safely
# skip this test.
raise testcase.TestSkipped(
'Work In Progress Test Failed as '
'expected%(bugstr)s: %(message)s' %
{'message': message, 'bugstr': bugstr})
raise AssertionError('Work In Progress Test Passed%(bugstr)s: '
'%(message)s' % {'message': message,
'bugstr': bugstr})
return run_test
return _wip
| apache-2.0 | 1,347,204,433,941,536,000 | 33.776786 | 78 | 0.550963 | false |
Fleurer/flask-oauthlib | example/google.py | 11 | 1905 | """
google example
~~~~~~~~~~~~~~
This example is contributed by Bruno Rocha
GitHub: https://github.com/rochacbruno
"""
from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.config['GOOGLE_ID'] = "cloud.google.com/console and get your ID"
app.config['GOOGLE_SECRET'] = "cloud.google.com/console and get the secret"
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
google = oauth.remote_app(
'google',
consumer_key=app.config.get('GOOGLE_ID'),
consumer_secret=app.config.get('GOOGLE_SECRET'),
request_token_params={
'scope': 'https://www.googleapis.com/auth/userinfo.email'
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
@app.route('/')
def index():
if 'google_token' in session:
me = google.get('userinfo')
return jsonify({"data": me.data})
return redirect(url_for('login'))
@app.route('/login')
def login():
return google.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('google_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = google.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['google_token'] = (resp['access_token'], '')
me = google.get('userinfo')
return jsonify({"data": me.data})
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
if __name__ == '__main__':
app.run()
| bsd-3-clause | -5,106,320,304,414,557,000 | 25.09589 | 75 | 0.645669 | false |
matt-oak/euler | 35_circular_primes.py | 2 | 1061 | #Project Euler Problem #35
#Circular Primes
#Check if the number is prime
def is_prime(x):
prime = True
for i in range(2, x/2 + 1):
if x % i == 0:
prime = False
break
else:
continue
return prime
#Return rotation of list 'x'
def rotate(x, n):
return x[n:] + x[:n]
#Iterate through all odd numbers < 1,000,000
circ_primes = 1
for i in range(3, 1000000, 2):
if i < 10:
if is_prime(i):
circ_primes += 1
continue
#If there is an even integer in the number disregard it
if "8" in str(i) or "6" in str(i) or "4" in str(i) or "2" in str(i) or "0" in str(i):
continue
chars = list(str(i))
num_rotations = len(chars)
circ_check = 0
#Iterate through all rotations of the string and see if it is prime
for j in range(1, num_rotations + 1):
rotation = rotate(chars, j)
string = ''.join(rotation)
num = int(string)
if is_prime(num):
circ_check += 1
else:
break
#If all rotations are prime, add 1 to circ_prime accumulator
if circ_check == num_rotations:
print ''.join(chars)
circ_primes += 1
print circ_primes
| mit | 1,540,910,615,525,741,800 | 19.803922 | 86 | 0.648445 | false |
lynxis/pkpgcounter | pkpgpdls/hbp.py | 1 | 2373 | # -*- coding: utf-8 -*-
#
# pkpgcounter: a generic Page Description Language parser
#
# (c) 2003-2009 Jerome Alet <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
#
"""This modules implements a page counter for Brother HBP documents."""
import sys
import os
import mmap
from struct import unpack
from . import pdlparser
class Parser(pdlparser.PDLParser):
"""A parser for HBP documents."""
format = "Brother HBP"
def isValid(self):
"""Returns True if data is HBP, else False."""
if self.firstblock.find(b"@PJL ENTER LANGUAGE = HBP\n") != -1:
return True
else:
return False
def getJobSize(self):
"""Counts pages in a HBP document.
Algorithm by Jerome Alet.
The documentation used for this was:
http://sf.net/projects/hbp-for-brother/
IMPORTANT: this may not work since @F should be sufficient,
but the documentation really is unclear and I don't know
how to skip raster data blocks for now.
"""
infileno = self.infile.fileno()
minfile = mmap.mmap(infileno, os.fstat(infileno)[6], prot=mmap.PROT_READ, flags=mmap.MAP_SHARED)
pagecount = 0
formfeed = b"@G\x00\x00\x01\xff@F"
fflen = len(formfeed)
pos = 0
try:
try:
while True:
if (minfile[pos] == "@") \
and (minfile[pos:pos+fflen] == formfeed):
pagecount += 1
pos += fflen
else:
pos += 1
except IndexError: # EOF ?
pass
finally:
minfile.close() # reached EOF
return pagecount
| gpl-3.0 | -8,211,449,209,254,833,000 | 31.067568 | 104 | 0.603877 | false |
jkstrick/samba | python/samba/dbchecker.py | 15 | 65845 | # Samba4 AD database checker
#
# Copyright (C) Andrew Tridgell 2011
# Copyright (C) Matthieu Patou <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ldb
import samba
import time
from base64 import b64decode
from samba import dsdb
from samba import common
from samba.dcerpc import misc
from samba.ndr import ndr_unpack, ndr_pack
from samba.dcerpc import drsblobs
from samba.common import dsdb_Dn
from samba.dcerpc import security
from samba.descriptor import get_wellknown_sds, get_diff_sds
from samba.auth import system_session, admin_session
class dbcheck(object):
"""check a SAM database for errors"""
def __init__(self, samdb, samdb_schema=None, verbose=False, fix=False,
yes=False, quiet=False, in_transaction=False,
reset_well_known_acls=False):
self.samdb = samdb
self.dict_oid_name = None
self.samdb_schema = (samdb_schema or samdb)
self.verbose = verbose
self.fix = fix
self.yes = yes
self.quiet = quiet
self.remove_all_unknown_attributes = False
self.remove_all_empty_attributes = False
self.fix_all_normalisation = False
self.fix_all_DN_GUIDs = False
self.fix_all_binary_dn = False
self.remove_all_deleted_DN_links = False
self.fix_all_target_mismatch = False
self.fix_all_metadata = False
self.fix_time_metadata = False
self.fix_all_missing_backlinks = False
self.fix_all_orphaned_backlinks = False
self.fix_rmd_flags = False
self.fix_ntsecuritydescriptor = False
self.fix_ntsecuritydescriptor_owner_group = False
self.seize_fsmo_role = False
self.move_to_lost_and_found = False
self.fix_instancetype = False
self.fix_replmetadata_zero_invocationid = False
self.fix_deleted_deleted_objects = False
self.fix_dn = False
self.fix_base64_userparameters = False
self.fix_utf8_userparameters = False
self.fix_doubled_userparameters = False
self.reset_well_known_acls = reset_well_known_acls
self.reset_all_well_known_acls = False
self.in_transaction = in_transaction
self.infrastructure_dn = ldb.Dn(samdb, "CN=Infrastructure," + samdb.domain_dn())
self.naming_dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn())
self.schema_dn = samdb.get_schema_basedn()
self.rid_dn = ldb.Dn(samdb, "CN=RID Manager$,CN=System," + samdb.domain_dn())
self.ntds_dsa = ldb.Dn(samdb, samdb.get_dsServiceName())
self.class_schemaIDGUID = {}
self.wellknown_sds = get_wellknown_sds(self.samdb)
self.fix_all_missing_objectclass = False
self.name_map = {}
try:
res = samdb.search(base="CN=DnsAdmins,CN=Users,%s" % samdb.domain_dn(), scope=ldb.SCOPE_BASE,
attrs=["objectSid"])
dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
self.name_map['DnsAdmins'] = str(dnsadmins_sid)
except ldb.LdbError, (enum, estr):
if enum != ldb.ERR_NO_SUCH_OBJECT:
raise
pass
self.system_session_info = system_session()
self.admin_session_info = admin_session(None, samdb.get_domain_sid())
res = self.samdb.search(base=self.ntds_dsa, scope=ldb.SCOPE_BASE, attrs=['msDS-hasMasterNCs', 'hasMasterNCs'])
if "msDS-hasMasterNCs" in res[0]:
self.write_ncs = res[0]["msDS-hasMasterNCs"]
else:
# If the Forest Level is less than 2003 then there is no
# msDS-hasMasterNCs, so we fall back to hasMasterNCs
# no need to merge as all the NCs that are in hasMasterNCs must
# also be in msDS-hasMasterNCs (but not the opposite)
if "hasMasterNCs" in res[0]:
self.write_ncs = res[0]["hasMasterNCs"]
else:
self.write_ncs = None
res = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=['namingContexts'])
try:
ncs = res[0]["namingContexts"]
self.deleted_objects_containers = []
for nc in ncs:
try:
dn = self.samdb.get_wellknown_dn(ldb.Dn(self.samdb, nc),
dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER)
self.deleted_objects_containers.append(dn)
except KeyError:
pass
except KeyError:
pass
except IndexError:
pass
def check_database(self, DN=None, scope=ldb.SCOPE_SUBTREE, controls=[], attrs=['*']):
'''perform a database check, returning the number of errors found'''
res = self.samdb.search(base=DN, scope=scope, attrs=['dn'], controls=controls)
self.report('Checking %u objects' % len(res))
error_count = 0
for object in res:
error_count += self.check_object(object.dn, attrs=attrs)
if DN is None:
error_count += self.check_rootdse()
if error_count != 0 and not self.fix:
self.report("Please use --fix to fix these errors")
self.report('Checked %u objects (%u errors)' % (len(res), error_count))
return error_count
def report(self, msg):
'''print a message unless quiet is set'''
if not self.quiet:
print(msg)
def confirm(self, msg, allow_all=False, forced=False):
'''confirm a change'''
if not self.fix:
return False
if self.quiet:
return self.yes
if self.yes:
forced = True
return common.confirm(msg, forced=forced, allow_all=allow_all)
################################################################
# a local confirm function with support for 'all'
def confirm_all(self, msg, all_attr):
'''confirm a change with support for "all" '''
if not self.fix:
return False
if self.quiet:
return self.yes
if getattr(self, all_attr) == 'NONE':
return False
if getattr(self, all_attr) == 'ALL':
forced = True
else:
forced = self.yes
c = common.confirm(msg, forced=forced, allow_all=True)
if c == 'ALL':
setattr(self, all_attr, 'ALL')
return True
if c == 'NONE':
setattr(self, all_attr, 'NONE')
return False
return c
def do_delete(self, dn, controls, msg):
'''delete dn with optional verbose output'''
if self.verbose:
self.report("delete DN %s" % dn)
try:
controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
self.samdb.delete(dn, controls=controls)
except Exception, err:
self.report("%s : %s" % (msg, err))
return False
return True
def do_modify(self, m, controls, msg, validate=True):
'''perform a modify with optional verbose output'''
if self.verbose:
self.report(self.samdb.write_ldif(m, ldb.CHANGETYPE_MODIFY))
try:
controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
self.samdb.modify(m, controls=controls, validate=validate)
except Exception, err:
self.report("%s : %s" % (msg, err))
return False
return True
def do_rename(self, from_dn, to_rdn, to_base, controls, msg):
'''perform a modify with optional verbose output'''
if self.verbose:
self.report("""dn: %s
changeType: modrdn
newrdn: %s
deleteOldRdn: 1
newSuperior: %s""" % (str(from_dn), str(to_rdn), str(to_base)))
try:
to_dn = to_rdn + to_base
controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
self.samdb.rename(from_dn, to_dn, controls=controls)
except Exception, err:
self.report("%s : %s" % (msg, err))
return False
return True
def err_empty_attribute(self, dn, attrname):
'''fix empty attributes'''
self.report("ERROR: Empty attribute %s in %s" % (attrname, dn))
if not self.confirm_all('Remove empty attribute %s from %s?' % (attrname, dn), 'remove_all_empty_attributes'):
self.report("Not fixing empty attribute %s" % attrname)
return
m = ldb.Message()
m.dn = dn
m[attrname] = ldb.MessageElement('', ldb.FLAG_MOD_DELETE, attrname)
if self.do_modify(m, ["relax:0", "show_recycled:1"],
"Failed to remove empty attribute %s" % attrname, validate=False):
self.report("Removed empty attribute %s" % attrname)
def err_normalise_mismatch(self, dn, attrname, values):
'''fix attribute normalisation errors'''
self.report("ERROR: Normalisation error for attribute %s in %s" % (attrname, dn))
mod_list = []
for val in values:
normalised = self.samdb.dsdb_normalise_attributes(
self.samdb_schema, attrname, [val])
if len(normalised) != 1:
self.report("Unable to normalise value '%s'" % val)
mod_list.append((val, ''))
elif (normalised[0] != val):
self.report("value '%s' should be '%s'" % (val, normalised[0]))
mod_list.append((val, normalised[0]))
if not self.confirm_all('Fix normalisation for %s from %s?' % (attrname, dn), 'fix_all_normalisation'):
self.report("Not fixing attribute %s" % attrname)
return
m = ldb.Message()
m.dn = dn
for i in range(0, len(mod_list)):
(val, nval) = mod_list[i]
m['value_%u' % i] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
if nval != '':
m['normv_%u' % i] = ldb.MessageElement(nval, ldb.FLAG_MOD_ADD,
attrname)
if self.do_modify(m, ["relax:0", "show_recycled:1"],
"Failed to normalise attribute %s" % attrname,
validate=False):
self.report("Normalised attribute %s" % attrname)
def err_normalise_mismatch_replace(self, dn, attrname, values):
'''fix attribute normalisation errors'''
normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, values)
self.report("ERROR: Normalisation error for attribute '%s' in '%s'" % (attrname, dn))
self.report("Values/Order of values do/does not match: %s/%s!" % (values, list(normalised)))
if list(normalised) == values:
return
if not self.confirm_all("Fix normalisation for '%s' from '%s'?" % (attrname, dn), 'fix_all_normalisation'):
self.report("Not fixing attribute '%s'" % attrname)
return
m = ldb.Message()
m.dn = dn
m[attrname] = ldb.MessageElement(normalised, ldb.FLAG_MOD_REPLACE, attrname)
if self.do_modify(m, ["relax:0", "show_recycled:1"],
"Failed to normalise attribute %s" % attrname,
validate=False):
self.report("Normalised attribute %s" % attrname)
def is_deleted_objects_dn(self, dsdb_dn):
'''see if a dsdb_Dn is the special Deleted Objects DN'''
return dsdb_dn.prefix == "B:32:%s:" % dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER
def err_missing_objectclass(self, dn):
"""handle object without objectclass"""
self.report("ERROR: missing objectclass in object %s. If you have another working DC, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (dn, self.samdb.get_nc_root(dn)))
if not self.confirm_all("If you cannot re-sync from another DC, do you wish to delete object '%s'?" % dn, 'fix_all_missing_objectclass'):
self.report("Not deleting object with missing objectclass '%s'" % dn)
return
if self.do_delete(dn, ["relax:0"],
"Failed to remove DN %s" % dn):
self.report("Removed DN %s" % dn)
def err_deleted_dn(self, dn, attrname, val, dsdb_dn, correct_dn):
"""handle a DN pointing to a deleted object"""
self.report("ERROR: target DN is deleted for %s in object %s - %s" % (attrname, dn, val))
self.report("Target GUID points at deleted DN %s" % correct_dn)
if not self.confirm_all('Remove DN link?', 'remove_all_deleted_DN_links'):
self.report("Not removing")
return
m = ldb.Message()
m.dn = dn
m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
if self.do_modify(m, ["show_recycled:1", "local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK],
"Failed to remove deleted DN attribute %s" % attrname):
self.report("Removed deleted DN on attribute %s" % attrname)
def err_missing_dn_GUID(self, dn, attrname, val, dsdb_dn):
"""handle a missing target DN (both GUID and DN string form are missing)"""
# check if its a backlink
linkID = self.samdb_schema.get_linkId_from_lDAPDisplayName(attrname)
if (linkID & 1 == 0) and str(dsdb_dn).find('\\0ADEL') == -1:
self.report("Not removing dangling forward link")
return
self.err_deleted_dn(dn, attrname, val, dsdb_dn, dsdb_dn)
def err_incorrect_dn_GUID(self, dn, attrname, val, dsdb_dn, errstr):
"""handle a missing GUID extended DN component"""
self.report("ERROR: %s component for %s in object %s - %s" % (errstr, attrname, dn, val))
controls=["extended_dn:1:1", "show_recycled:1"]
try:
res = self.samdb.search(base=str(dsdb_dn.dn), scope=ldb.SCOPE_BASE,
attrs=[], controls=controls)
except ldb.LdbError, (enum, estr):
self.report("unable to find object for DN %s - (%s)" % (dsdb_dn.dn, estr))
self.err_missing_dn_GUID(dn, attrname, val, dsdb_dn)
return
if len(res) == 0:
self.report("unable to find object for DN %s" % dsdb_dn.dn)
self.err_missing_dn_GUID(dn, attrname, val, dsdb_dn)
return
dsdb_dn.dn = res[0].dn
if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_DN_GUIDs'):
self.report("Not fixing %s" % errstr)
return
m = ldb.Message()
m.dn = dn
m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
if self.do_modify(m, ["show_recycled:1"],
"Failed to fix %s on attribute %s" % (errstr, attrname)):
self.report("Fixed %s on attribute %s" % (errstr, attrname))
def err_incorrect_binary_dn(self, dn, attrname, val, dsdb_dn, errstr):
"""handle an incorrect binary DN component"""
self.report("ERROR: %s binary component for %s in object %s - %s" % (errstr, attrname, dn, val))
controls=["extended_dn:1:1", "show_recycled:1"]
if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_binary_dn'):
self.report("Not fixing %s" % errstr)
return
m = ldb.Message()
m.dn = dn
m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
if self.do_modify(m, ["show_recycled:1"],
"Failed to fix %s on attribute %s" % (errstr, attrname)):
self.report("Fixed %s on attribute %s" % (errstr, attrname))
def err_dn_target_mismatch(self, dn, attrname, val, dsdb_dn, correct_dn, errstr):
"""handle a DN string being incorrect"""
self.report("ERROR: incorrect DN string component for %s in object %s - %s" % (attrname, dn, val))
dsdb_dn.dn = correct_dn
if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_target_mismatch'):
self.report("Not fixing %s" % errstr)
return
m = ldb.Message()
m.dn = dn
m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
if self.do_modify(m, ["show_recycled:1"],
"Failed to fix incorrect DN string on attribute %s" % attrname):
self.report("Fixed incorrect DN string on attribute %s" % (attrname))
def err_unknown_attribute(self, obj, attrname):
'''handle an unknown attribute error'''
self.report("ERROR: unknown attribute '%s' in %s" % (attrname, obj.dn))
if not self.confirm_all('Remove unknown attribute %s' % attrname, 'remove_all_unknown_attributes'):
self.report("Not removing %s" % attrname)
return
m = ldb.Message()
m.dn = obj.dn
m['old_value'] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attrname)
if self.do_modify(m, ["relax:0", "show_recycled:1"],
"Failed to remove unknown attribute %s" % attrname):
self.report("Removed unknown attribute %s" % (attrname))
def err_missing_backlink(self, obj, attrname, val, backlink_name, target_dn):
'''handle a missing backlink value'''
self.report("ERROR: missing backlink attribute '%s' in %s for link %s in %s" % (backlink_name, target_dn, attrname, obj.dn))
if not self.confirm_all('Fix missing backlink %s' % backlink_name, 'fix_all_missing_backlinks'):
self.report("Not fixing missing backlink %s" % backlink_name)
return
m = ldb.Message()
m.dn = obj.dn
m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
m['new_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_ADD, attrname)
if self.do_modify(m, ["show_recycled:1"],
"Failed to fix missing backlink %s" % backlink_name):
self.report("Fixed missing backlink %s" % (backlink_name))
def err_incorrect_rmd_flags(self, obj, attrname, revealed_dn):
'''handle a incorrect RMD_FLAGS value'''
rmd_flags = int(revealed_dn.dn.get_extended_component("RMD_FLAGS"))
self.report("ERROR: incorrect RMD_FLAGS value %u for attribute '%s' in %s for link %s" % (rmd_flags, attrname, obj.dn, revealed_dn.dn.extended_str()))
if not self.confirm_all('Fix incorrect RMD_FLAGS %u' % rmd_flags, 'fix_rmd_flags'):
self.report("Not fixing incorrect RMD_FLAGS %u" % rmd_flags)
return
m = ldb.Message()
m.dn = obj.dn
m['old_value'] = ldb.MessageElement(str(revealed_dn), ldb.FLAG_MOD_DELETE, attrname)
if self.do_modify(m, ["show_recycled:1", "reveal_internals:0", "show_deleted:0"],
"Failed to fix incorrect RMD_FLAGS %u" % rmd_flags):
self.report("Fixed incorrect RMD_FLAGS %u" % (rmd_flags))
def err_orphaned_backlink(self, obj, attrname, val, link_name, target_dn):
'''handle a orphaned backlink value'''
self.report("ERROR: orphaned backlink attribute '%s' in %s for link %s in %s" % (attrname, obj.dn, link_name, target_dn))
if not self.confirm_all('Remove orphaned backlink %s' % link_name, 'fix_all_orphaned_backlinks'):
self.report("Not removing orphaned backlink %s" % link_name)
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
if self.do_modify(m, ["show_recycled:1", "relax:0"],
"Failed to fix orphaned backlink %s" % link_name):
self.report("Fixed orphaned backlink %s" % (link_name))
def err_no_fsmoRoleOwner(self, obj):
'''handle a missing fSMORoleOwner'''
self.report("ERROR: fSMORoleOwner not found for role %s" % (obj.dn))
res = self.samdb.search("",
scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
assert len(res) == 1
serviceName = res[0]["dsServiceName"][0]
if not self.confirm_all('Sieze role %s onto current DC by adding fSMORoleOwner=%s' % (obj.dn, serviceName), 'seize_fsmo_role'):
self.report("Not Siezing role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName))
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(serviceName, ldb.FLAG_MOD_ADD, 'fSMORoleOwner')
if self.do_modify(m, [],
"Failed to sieze role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName)):
self.report("Siezed role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName))
def err_missing_parent(self, obj):
'''handle a missing parent'''
self.report("ERROR: parent object not found for %s" % (obj.dn))
if not self.confirm_all('Move object %s into LostAndFound?' % (obj.dn), 'move_to_lost_and_found'):
self.report('Not moving object %s into LostAndFound' % (obj.dn))
return
keep_transaction = True
self.samdb.transaction_start()
try:
nc_root = self.samdb.get_nc_root(obj.dn);
lost_and_found = self.samdb.get_wellknown_dn(nc_root, dsdb.DS_GUID_LOSTANDFOUND_CONTAINER)
new_dn = ldb.Dn(self.samdb, str(obj.dn))
new_dn.remove_base_components(len(new_dn) - 1)
if self.do_rename(obj.dn, new_dn, lost_and_found, ["show_deleted:0", "relax:0"],
"Failed to rename object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found)):
self.report("Renamed object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found))
m = ldb.Message()
m.dn = obj.dn
m['lastKnownParent'] = ldb.MessageElement(str(obj.dn.parent()), ldb.FLAG_MOD_REPLACE, 'lastKnownParent')
if self.do_modify(m, [],
"Failed to set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found)):
self.report("Set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found))
keep_transaction = True
except:
self.samdb.transaction_cancel()
raise
if keep_transaction:
self.samdb.transaction_commit()
else:
self.samdb.transaction_cancel()
def err_wrong_dn(self, obj, new_dn, rdn_attr, rdn_val, name_val):
'''handle a wrong dn'''
new_rdn = ldb.Dn(self.samdb, str(new_dn))
new_rdn.remove_base_components(len(new_rdn) - 1)
new_parent = new_dn.parent()
attributes = ""
if rdn_val != name_val:
attributes += "%s=%r " % (rdn_attr, rdn_val)
attributes += "name=%r" % (name_val)
self.report("ERROR: wrong dn[%s] %s new_dn[%s]" % (obj.dn, attributes, new_dn))
if not self.confirm_all("Rename %s to %s?" % (obj.dn, new_dn), 'fix_dn'):
self.report("Not renaming %s to %s" % (obj.dn, new_dn))
return
if self.do_rename(obj.dn, new_rdn, new_parent, ["show_recycled:1", "relax:0"],
"Failed to rename object %s into %s" % (obj.dn, new_dn)):
self.report("Renamed %s into %s" % (obj.dn, new_dn))
def err_wrong_instancetype(self, obj, calculated_instancetype):
'''handle a wrong instanceType'''
self.report("ERROR: wrong instanceType %s on %s, should be %d" % (obj["instanceType"], obj.dn, calculated_instancetype))
if not self.confirm_all('Change instanceType from %s to %d on %s?' % (obj["instanceType"], calculated_instancetype, obj.dn), 'fix_instancetype'):
self.report('Not changing instanceType from %s to %d on %s' % (obj["instanceType"], calculated_instancetype, obj.dn))
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(str(calculated_instancetype), ldb.FLAG_MOD_REPLACE, 'instanceType')
if self.do_modify(m, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA],
"Failed to correct missing instanceType on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype)):
self.report("Corrected instancetype on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype))
def err_short_userParameters(self, obj, attrname, value):
# This is a truncated userParameters due to a pre 4.1 replication bug
self.report("ERROR: incorrect userParameters value on object %s. If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn)))
def err_base64_userParameters(self, obj, attrname, value):
'''handle a wrong userParameters'''
self.report("ERROR: wrongly formatted userParameters %s on %s, should not be base64-encoded" % (value, obj.dn))
if not self.confirm_all('Convert userParameters from base64 encoding on %s?' % (obj.dn), 'fix_base64_userparameters'):
self.report('Not changing userParameters from base64 encoding on %s' % (obj.dn))
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(b64decode(obj[attrname][0]), ldb.FLAG_MOD_REPLACE, 'userParameters')
if self.do_modify(m, [],
"Failed to correct base64-encoded userParameters on %s by converting from base64" % (obj.dn)):
self.report("Corrected base64-encoded userParameters on %s by converting from base64" % (obj.dn))
def err_utf8_userParameters(self, obj, attrname, value):
'''handle a wrong userParameters'''
self.report("ERROR: wrongly formatted userParameters on %s, should not be psudo-UTF8 encoded" % (obj.dn))
if not self.confirm_all('Convert userParameters from UTF8 encoding on %s?' % (obj.dn), 'fix_utf8_userparameters'):
self.report('Not changing userParameters from UTF8 encoding on %s' % (obj.dn))
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf8').encode('utf-16-le'),
ldb.FLAG_MOD_REPLACE, 'userParameters')
if self.do_modify(m, [],
"Failed to correct psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn)):
self.report("Corrected psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn))
def err_doubled_userParameters(self, obj, attrname, value):
'''handle a wrong userParameters'''
self.report("ERROR: wrongly formatted userParameters on %s, should not be double UTF16 encoded" % (obj.dn))
if not self.confirm_all('Convert userParameters from doubled UTF-16 encoding on %s?' % (obj.dn), 'fix_doubled_userparameters'):
self.report('Not changing userParameters from doubled UTF-16 encoding on %s' % (obj.dn))
return
m = ldb.Message()
m.dn = obj.dn
m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf-16-le').decode('utf-16-le').encode('utf-16-le'),
ldb.FLAG_MOD_REPLACE, 'userParameters')
if self.do_modify(m, [],
"Failed to correct doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn)):
self.report("Corrected doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn))
def err_odd_userParameters(self, obj, attrname):
# This is a truncated userParameters due to a pre 4.1 replication bug
self.report("ERROR: incorrect userParameters value on object %s (odd length). If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn)))
def find_revealed_link(self, dn, attrname, guid):
'''return a revealed link in an object'''
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[attrname],
controls=["show_deleted:0", "extended_dn:0", "reveal_internals:0"])
syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname)
for val in res[0][attrname]:
dsdb_dn = dsdb_Dn(self.samdb, val, syntax_oid)
guid2 = dsdb_dn.dn.get_extended_component("GUID")
if guid == guid2:
return dsdb_dn
return None
def check_dn(self, obj, attrname, syntax_oid):
'''check a DN attribute for correctness'''
error_count = 0
for val in obj[attrname]:
dsdb_dn = dsdb_Dn(self.samdb, val, syntax_oid)
# all DNs should have a GUID component
guid = dsdb_dn.dn.get_extended_component("GUID")
if guid is None:
error_count += 1
self.err_incorrect_dn_GUID(obj.dn, attrname, val, dsdb_dn,
"missing GUID")
continue
guidstr = str(misc.GUID(guid))
attrs = ['isDeleted']
if (str(attrname).lower() == 'msds-hasinstantiatedncs') and (obj.dn == self.ntds_dsa):
fixing_msDS_HasInstantiatedNCs = True
attrs.append("instanceType")
else:
fixing_msDS_HasInstantiatedNCs = False
linkID = self.samdb_schema.get_linkId_from_lDAPDisplayName(attrname)
reverse_link_name = self.samdb_schema.get_backlink_from_lDAPDisplayName(attrname)
if reverse_link_name is not None:
attrs.append(reverse_link_name)
# check its the right GUID
try:
res = self.samdb.search(base="<GUID=%s>" % guidstr, scope=ldb.SCOPE_BASE,
attrs=attrs, controls=["extended_dn:1:1", "show_recycled:1"])
except ldb.LdbError, (enum, estr):
error_count += 1
self.err_incorrect_dn_GUID(obj.dn, attrname, val, dsdb_dn, "incorrect GUID")
continue
if fixing_msDS_HasInstantiatedNCs:
dsdb_dn.prefix = "B:8:%08X:" % int(res[0]['instanceType'][0])
dsdb_dn.binary = "%08X" % int(res[0]['instanceType'][0])
if str(dsdb_dn) != val:
error_count +=1
self.err_incorrect_binary_dn(obj.dn, attrname, val, dsdb_dn, "incorrect instanceType part of Binary DN")
continue
# now we have two cases - the source object might or might not be deleted
is_deleted = 'isDeleted' in obj and obj['isDeleted'][0].upper() == 'TRUE'
target_is_deleted = 'isDeleted' in res[0] and res[0]['isDeleted'][0].upper() == 'TRUE'
# the target DN is not allowed to be deleted, unless the target DN is the
# special Deleted Objects container
if target_is_deleted and not is_deleted and not self.is_deleted_objects_dn(dsdb_dn):
error_count += 1
self.err_deleted_dn(obj.dn, attrname, val, dsdb_dn, res[0].dn)
continue
# check the DN matches in string form
if res[0].dn.extended_str() != dsdb_dn.dn.extended_str():
error_count += 1
self.err_dn_target_mismatch(obj.dn, attrname, val, dsdb_dn,
res[0].dn, "incorrect string version of DN")
continue
if is_deleted and not target_is_deleted and reverse_link_name is not None:
revealed_dn = self.find_revealed_link(obj.dn, attrname, guid)
rmd_flags = revealed_dn.dn.get_extended_component("RMD_FLAGS")
if rmd_flags is not None and (int(rmd_flags) & 1) == 0:
# the RMD_FLAGS for this link should be 1, as the target is deleted
self.err_incorrect_rmd_flags(obj, attrname, revealed_dn)
continue
# check the reverse_link is correct if there should be one
if reverse_link_name is not None:
match_count = 0
if reverse_link_name in res[0]:
for v in res[0][reverse_link_name]:
if v == obj.dn.extended_str():
match_count += 1
if match_count != 1:
error_count += 1
if linkID & 1:
self.err_orphaned_backlink(obj, attrname, val, reverse_link_name, dsdb_dn.dn)
else:
self.err_missing_backlink(obj, attrname, val, reverse_link_name, dsdb_dn.dn)
continue
return error_count
def get_originating_time(self, val, attid):
'''Read metadata properties and return the originating time for
a given attributeId.
:return: the originating time or 0 if not found
'''
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, str(val))
obj = repl.ctr
for o in repl.ctr.array:
if o.attid == attid:
return o.originating_change_time
return 0
def process_metadata(self, val):
'''Read metadata properties and list attributes in it'''
list_att = []
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, str(val))
obj = repl.ctr
for o in repl.ctr.array:
att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
list_att.append(att.lower())
return list_att
def fix_metadata(self, dn, attr):
'''re-write replPropertyMetaData elements for a single attribute for a
object. This is used to fix missing replPropertyMetaData elements'''
res = self.samdb.search(base = dn, scope=ldb.SCOPE_BASE, attrs = [attr],
controls = ["search_options:1:2", "show_recycled:1"])
msg = res[0]
nmsg = ldb.Message()
nmsg.dn = dn
nmsg[attr] = ldb.MessageElement(msg[attr], ldb.FLAG_MOD_REPLACE, attr)
if self.do_modify(nmsg, ["relax:0", "provision:0", "show_recycled:1"],
"Failed to fix metadata for attribute %s" % attr):
self.report("Fixed metadata for attribute %s" % attr)
def ace_get_effective_inherited_type(self, ace):
if ace.flags & security.SEC_ACE_FLAG_INHERIT_ONLY:
return None
check = False
if ace.type == security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT:
check = True
elif ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT:
check = True
elif ace.type == security.SEC_ACE_TYPE_SYSTEM_AUDIT_OBJECT:
check = True
elif ace.type == security.SEC_ACE_TYPE_SYSTEM_ALARM_OBJECT:
check = True
if not check:
return None
if not ace.object.flags & security.SEC_ACE_INHERITED_OBJECT_TYPE_PRESENT:
return None
return str(ace.object.inherited_type)
def lookup_class_schemaIDGUID(self, cls):
if cls in self.class_schemaIDGUID:
return self.class_schemaIDGUID[cls]
flt = "(&(ldapDisplayName=%s)(objectClass=classSchema))" % cls
res = self.samdb.search(base=self.schema_dn,
expression=flt,
attrs=["schemaIDGUID"])
t = str(ndr_unpack(misc.GUID, res[0]["schemaIDGUID"][0]))
self.class_schemaIDGUID[cls] = t
return t
def process_sd(self, dn, obj):
sd_attr = "nTSecurityDescriptor"
sd_val = obj[sd_attr]
sd = ndr_unpack(security.descriptor, str(sd_val))
is_deleted = 'isDeleted' in obj and obj['isDeleted'][0].upper() == 'TRUE'
if is_deleted:
# we don't fix deleted objects
return (sd, None)
sd_clean = security.descriptor()
sd_clean.owner_sid = sd.owner_sid
sd_clean.group_sid = sd.group_sid
sd_clean.type = sd.type
sd_clean.revision = sd.revision
broken = False
last_inherited_type = None
aces = []
if sd.sacl is not None:
aces = sd.sacl.aces
for i in range(0, len(aces)):
ace = aces[i]
if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
sd_clean.sacl_add(ace)
continue
t = self.ace_get_effective_inherited_type(ace)
if t is None:
continue
if last_inherited_type is not None:
if t != last_inherited_type:
# if it inherited from more than
# one type it's very likely to be broken
#
# If not the recalculation will calculate
# the same result.
broken = True
continue
last_inherited_type = t
aces = []
if sd.dacl is not None:
aces = sd.dacl.aces
for i in range(0, len(aces)):
ace = aces[i]
if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
sd_clean.dacl_add(ace)
continue
t = self.ace_get_effective_inherited_type(ace)
if t is None:
continue
if last_inherited_type is not None:
if t != last_inherited_type:
# if it inherited from more than
# one type it's very likely to be broken
#
# If not the recalculation will calculate
# the same result.
broken = True
continue
last_inherited_type = t
if broken:
return (sd_clean, sd)
if last_inherited_type is None:
# ok
return (sd, None)
cls = None
try:
cls = obj["objectClass"][-1]
except KeyError, e:
pass
if cls is None:
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
attrs=["isDeleted", "objectClass"],
controls=["show_recycled:1"])
o = res[0]
is_deleted = 'isDeleted' in o and o['isDeleted'][0].upper() == 'TRUE'
if is_deleted:
# we don't fix deleted objects
return (sd, None)
cls = o["objectClass"][-1]
t = self.lookup_class_schemaIDGUID(cls)
if t != last_inherited_type:
# broken
return (sd_clean, sd)
# ok
return (sd, None)
def err_wrong_sd(self, dn, sd, sd_broken):
'''re-write the SD due to incorrect inherited ACEs'''
sd_attr = "nTSecurityDescriptor"
sd_val = ndr_pack(sd)
sd_flags = security.SECINFO_DACL | security.SECINFO_SACL
if not self.confirm_all('Fix %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor'):
self.report('Not fixing %s on %s\n' % (sd_attr, dn))
return
nmsg = ldb.Message()
nmsg.dn = dn
nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags],
"Failed to fix attribute %s" % sd_attr):
self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
def err_wrong_default_sd(self, dn, sd, sd_old, diff):
'''re-write the SD due to not matching the default (optional mode for fixing an incorrect provision)'''
sd_attr = "nTSecurityDescriptor"
sd_val = ndr_pack(sd)
sd_old_val = ndr_pack(sd_old)
sd_flags = security.SECINFO_DACL | security.SECINFO_SACL
if sd.owner_sid is not None:
sd_flags |= security.SECINFO_OWNER
if sd.group_sid is not None:
sd_flags |= security.SECINFO_GROUP
if not self.confirm_all('Reset %s on %s back to provision default?\n%s' % (sd_attr, dn, diff), 'reset_all_well_known_acls'):
self.report('Not resetting %s on %s\n' % (sd_attr, dn))
return
m = ldb.Message()
m.dn = dn
m[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
if self.do_modify(m, ["sd_flags:1:%d" % sd_flags],
"Failed to reset attribute %s" % sd_attr):
self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
def err_missing_sd_owner(self, dn, sd):
'''re-write the SD due to a missing owner or group'''
sd_attr = "nTSecurityDescriptor"
sd_val = ndr_pack(sd)
sd_flags = security.SECINFO_OWNER | security.SECINFO_GROUP
if not self.confirm_all('Fix missing owner or group in %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor_owner_group'):
self.report('Not fixing missing owner or group %s on %s\n' % (sd_attr, dn))
return
nmsg = ldb.Message()
nmsg.dn = dn
nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
# By setting the session_info to admin_session_info and
# setting the security.SECINFO_OWNER | security.SECINFO_GROUP
# flags we cause the descriptor module to set the correct
# owner and group on the SD, replacing the None/NULL values
# for owner_sid and group_sid currently present.
#
# The admin_session_info matches that used in provision, and
# is the best guess we can make for an existing object that
# hasn't had something specifically set.
#
# This is important for the dns related naming contexts.
self.samdb.set_session_info(self.admin_session_info)
if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags],
"Failed to fix metadata for attribute %s" % sd_attr):
self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
self.samdb.set_session_info(self.system_session_info)
def has_replmetadata_zero_invocationid(self, dn, repl_meta_data):
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(repl_meta_data))
ctr = repl.ctr
found = False
for o in ctr.array:
# Search for a zero invocationID
if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"):
continue
found = True
self.report('''ERROR: on replPropertyMetaData of %s, the instanceType on attribute 0x%08x,
version %d changed at %s is 00000000-0000-0000-0000-000000000000,
but should be non-zero. Proposed fix is to set to our invocationID (%s).'''
% (dn, o.attid, o.version,
time.ctime(samba.nttime2unix(o.originating_change_time)),
self.samdb.get_invocation_id()))
return found
def err_replmetadata_zero_invocationid(self, dn, attr, repl_meta_data):
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(repl_meta_data))
ctr = repl.ctr
now = samba.unix2nttime(int(time.time()))
found = False
for o in ctr.array:
# Search for a zero invocationID
if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"):
continue
found = True
seq = self.samdb.sequence_number(ldb.SEQ_NEXT)
o.version = o.version + 1
o.originating_change_time = now
o.originating_invocation_id = misc.GUID(self.samdb.get_invocation_id())
o.originating_usn = seq
o.local_usn = seq
if found:
replBlob = ndr_pack(repl)
msg = ldb.Message()
msg.dn = dn
if not self.confirm_all('Fix %s on %s by setting originating_invocation_id on some elements to our invocationID %s?'
% (attr, dn, self.samdb.get_invocation_id()), 'fix_replmetadata_zero_invocationid'):
self.report('Not fixing %s on %s\n' % (attr, dn))
return
nmsg = ldb.Message()
nmsg.dn = dn
nmsg[attr] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, attr)
if self.do_modify(nmsg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"],
"Failed to fix attribute %s" % attr):
self.report("Fixed attribute '%s' of '%s'\n" % (attr, dn))
def is_deleted_deleted_objects(self, obj):
faulty = False
if "description" not in obj:
self.report("ERROR: description not present on Deleted Objects container %s" % obj.dn)
faulty = True
if "showInAdvancedViewOnly" not in obj:
self.report("ERROR: showInAdvancedViewOnly not present on Deleted Objects container %s" % obj.dn)
faulty = True
if "objectCategory" not in obj:
self.report("ERROR: objectCategory not present on Deleted Objects container %s" % obj.dn)
faulty = True
if "isCriticalSystemObject" not in obj:
self.report("ERROR: isCriticalSystemObject not present on Deleted Objects container %s" % obj.dn)
faulty = True
if "isRecycled" in obj:
self.report("ERROR: isRecycled present on Deleted Objects container %s" % obj.dn)
faulty = True
return faulty
def err_deleted_deleted_objects(self, obj):
nmsg = ldb.Message()
nmsg.dn = dn = obj.dn
if "description" not in obj:
nmsg["description"] = ldb.MessageElement("Container for deleted objects", ldb.FLAG_MOD_REPLACE, "description")
if "showInAdvancedViewOnly" not in obj:
nmsg["showInAdvancedViewOnly"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "showInAdvancedViewOnly")
if "objectCategory" not in obj:
nmsg["objectCategory"] = ldb.MessageElement("CN=Container,%s" % self.schema_dn, ldb.FLAG_MOD_REPLACE, "objectCategory")
if "isCriticalSystemObject" not in obj:
nmsg["isCriticalSystemObject"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isCriticalSystemObject")
if "isRecycled" in obj:
nmsg["isRecycled"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_DELETE, "isRecycled")
if not self.confirm_all('Fix Deleted Objects container %s by restoring default attributes?'
% (dn), 'fix_deleted_deleted_objects'):
self.report('Not fixing missing/incorrect attributes on %s\n' % (dn))
return
if self.do_modify(nmsg, ["relax:0"],
"Failed to fix Deleted Objects container %s" % dn):
self.report("Fixed Deleted Objects container '%s'\n" % (dn))
def is_fsmo_role(self, dn):
if dn == self.samdb.domain_dn:
return True
if dn == self.infrastructure_dn:
return True
if dn == self.naming_dn:
return True
if dn == self.schema_dn:
return True
if dn == self.rid_dn:
return True
return False
def calculate_instancetype(self, dn):
instancetype = 0
nc_root = self.samdb.get_nc_root(dn)
if dn == nc_root:
instancetype |= dsdb.INSTANCE_TYPE_IS_NC_HEAD
try:
self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE, attrs=[], controls=["show_recycled:1"])
except ldb.LdbError, (enum, estr):
if enum != ldb.ERR_NO_SUCH_OBJECT:
raise
else:
instancetype |= dsdb.INSTANCE_TYPE_NC_ABOVE
if self.write_ncs is not None and str(nc_root) in self.write_ncs:
instancetype |= dsdb.INSTANCE_TYPE_WRITE
return instancetype
def get_wellknown_sd(self, dn):
for [sd_dn, descriptor_fn] in self.wellknown_sds:
if dn == sd_dn:
domain_sid = security.dom_sid(self.samdb.get_domain_sid())
return ndr_unpack(security.descriptor,
descriptor_fn(domain_sid,
name_map=self.name_map))
raise KeyError
def check_object(self, dn, attrs=['*']):
'''check one object'''
if self.verbose:
self.report("Checking object %s" % dn)
if "dn" in map(str.lower, attrs):
attrs.append("name")
if "distinguishedname" in map(str.lower, attrs):
attrs.append("name")
if str(dn.get_rdn_name()).lower() in map(str.lower, attrs):
attrs.append("name")
if 'name' in map(str.lower, attrs):
attrs.append(dn.get_rdn_name())
attrs.append("isDeleted")
attrs.append("systemFlags")
if '*' in attrs:
attrs.append("replPropertyMetaData")
try:
sd_flags = 0
sd_flags |= security.SECINFO_OWNER
sd_flags |= security.SECINFO_GROUP
sd_flags |= security.SECINFO_DACL
sd_flags |= security.SECINFO_SACL
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
controls=[
"extended_dn:1:1",
"show_recycled:1",
"show_deleted:1",
"sd_flags:1:%d" % sd_flags,
],
attrs=attrs)
except ldb.LdbError, (enum, estr):
if enum == ldb.ERR_NO_SUCH_OBJECT:
if self.in_transaction:
self.report("ERROR: Object %s disappeared during check" % dn)
return 1
return 0
raise
if len(res) != 1:
self.report("ERROR: Object %s failed to load during check" % dn)
return 1
obj = res[0]
error_count = 0
list_attrs_from_md = []
list_attrs_seen = []
got_repl_property_meta_data = False
got_objectclass = False
nc_dn = self.samdb.get_nc_root(obj.dn)
try:
deleted_objects_dn = self.samdb.get_wellknown_dn(nc_dn,
samba.dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER)
except KeyError, e:
deleted_objects_dn = ldb.Dn(self.samdb, "CN=Deleted Objects,%s" % nc_dn)
object_rdn_attr = None
object_rdn_val = None
name_val = None
isDeleted = False
systemFlags = 0
for attrname in obj:
if attrname == 'dn':
continue
if str(attrname).lower() == 'objectclass':
got_objectclass = True
if str(attrname).lower() == "name":
if len(obj[attrname]) != 1:
error_count += 1
self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" %
(len(obj[attrname]), attrname, str(obj.dn)))
else:
name_val = obj[attrname][0]
if str(attrname).lower() == str(obj.dn.get_rdn_name()).lower():
object_rdn_attr = attrname
if len(obj[attrname]) != 1:
error_count += 1
self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" %
(len(obj[attrname]), attrname, str(obj.dn)))
else:
object_rdn_val = obj[attrname][0]
if str(attrname).lower() == 'isdeleted':
if obj[attrname][0] != "FALSE":
isDeleted = True
if str(attrname).lower() == 'systemflags':
systemFlags = int(obj[attrname][0])
if str(attrname).lower() == 'replpropertymetadata':
if self.has_replmetadata_zero_invocationid(dn, obj[attrname]):
error_count += 1
self.err_replmetadata_zero_invocationid(dn, attrname, obj[attrname])
# We don't continue, as we may also have other fixes for this attribute
# based on what other attributes we see.
list_attrs_from_md = self.process_metadata(obj[attrname])
got_repl_property_meta_data = True
continue
if str(attrname).lower() == 'ntsecuritydescriptor':
(sd, sd_broken) = self.process_sd(dn, obj)
if sd_broken is not None:
self.err_wrong_sd(dn, sd, sd_broken)
error_count += 1
continue
if sd.owner_sid is None or sd.group_sid is None:
self.err_missing_sd_owner(dn, sd)
error_count += 1
continue
if self.reset_well_known_acls:
try:
well_known_sd = self.get_wellknown_sd(dn)
except KeyError:
continue
current_sd = ndr_unpack(security.descriptor,
str(obj[attrname][0]))
diff = get_diff_sds(well_known_sd, current_sd, security.dom_sid(self.samdb.get_domain_sid()))
if diff != "":
self.err_wrong_default_sd(dn, well_known_sd, current_sd, diff)
error_count += 1
continue
continue
if str(attrname).lower() == 'objectclass':
normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, list(obj[attrname]))
if list(normalised) != list(obj[attrname]):
self.err_normalise_mismatch_replace(dn, attrname, list(obj[attrname]))
error_count += 1
continue
if str(attrname).lower() == 'userparameters':
if len(obj[attrname][0]) == 1 and obj[attrname][0][0] == '\x20':
error_count += 1
self.err_short_userParameters(obj, attrname, obj[attrname])
continue
elif obj[attrname][0][:16] == '\x20\x00\x20\x00\x20\x00\x20\x00\x20\x00\x20\x00\x20\x00\x20\x00':
# This is the correct, normal prefix
continue
elif obj[attrname][0][:20] == 'IAAgACAAIAAgACAAIAAg':
# this is the typical prefix from a windows migration
error_count += 1
self.err_base64_userParameters(obj, attrname, obj[attrname])
continue
elif obj[attrname][0][1] != '\x00' and obj[attrname][0][3] != '\x00' and obj[attrname][0][5] != '\x00' and obj[attrname][0][7] != '\x00' and obj[attrname][0][9] != '\x00':
# This is a prefix that is not in UTF-16 format for the space or munged dialback prefix
error_count += 1
self.err_utf8_userParameters(obj, attrname, obj[attrname])
continue
elif len(obj[attrname][0]) % 2 != 0:
# This is a value that isn't even in length
error_count += 1
self.err_odd_userParameters(obj, attrname, obj[attrname])
continue
elif obj[attrname][0][1] == '\x00' and obj[attrname][0][2] == '\x00' and obj[attrname][0][3] == '\x00' and obj[attrname][0][4] != '\x00' and obj[attrname][0][5] == '\x00':
# This is a prefix that would happen if a SAMR-written value was replicated from a Samba 4.1 server to a working server
error_count += 1
self.err_doubled_userParameters(obj, attrname, obj[attrname])
continue
# check for empty attributes
for val in obj[attrname]:
if val == '':
self.err_empty_attribute(dn, attrname)
error_count += 1
continue
# get the syntax oid for the attribute, so we can can have
# special handling for some specific attribute types
try:
syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname)
except Exception, msg:
self.err_unknown_attribute(obj, attrname)
error_count += 1
continue
flag = self.samdb_schema.get_systemFlags_from_lDAPDisplayName(attrname)
if (not flag & dsdb.DS_FLAG_ATTR_NOT_REPLICATED
and not flag & dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED
and not self.samdb_schema.get_linkId_from_lDAPDisplayName(attrname)):
list_attrs_seen.append(str(attrname).lower())
if syntax_oid in [ dsdb.DSDB_SYNTAX_BINARY_DN, dsdb.DSDB_SYNTAX_OR_NAME,
dsdb.DSDB_SYNTAX_STRING_DN, ldb.SYNTAX_DN ]:
# it's some form of DN, do specialised checking on those
error_count += self.check_dn(obj, attrname, syntax_oid)
# check for incorrectly normalised attributes
for val in obj[attrname]:
normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, [val])
if len(normalised) != 1 or normalised[0] != val:
self.err_normalise_mismatch(dn, attrname, obj[attrname])
error_count += 1
break
if str(attrname).lower() == "instancetype":
calculated_instancetype = self.calculate_instancetype(dn)
if len(obj["instanceType"]) != 1 or obj["instanceType"][0] != str(calculated_instancetype):
error_count += 1
self.err_wrong_instancetype(obj, calculated_instancetype)
if not got_objectclass and ("*" in attrs or "objectclass" in map(str.lower, attrs)):
error_count += 1
self.err_missing_objectclass(dn)
if ("*" in attrs or "name" in map(str.lower, attrs)):
if name_val is None:
error_count += 1
self.report("ERROR: Not fixing missing 'name' on '%s'" % (str(obj.dn)))
if object_rdn_attr is None:
error_count += 1
self.report("ERROR: Not fixing missing '%s' on '%s'" % (obj.dn.get_rdn_name(), str(obj.dn)))
if name_val is not None:
parent_dn = None
if isDeleted:
if not (systemFlags & samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE):
parent_dn = deleted_objects_dn
if parent_dn is None:
parent_dn = obj.dn.parent()
expected_dn = ldb.Dn(self.samdb, "RDN=RDN,%s" % (parent_dn))
expected_dn.set_component(0, obj.dn.get_rdn_name(), name_val)
if obj.dn == deleted_objects_dn:
expected_dn = obj.dn
if expected_dn != obj.dn:
error_count += 1
self.err_wrong_dn(obj, expected_dn, object_rdn_attr, object_rdn_val, name_val)
elif obj.dn.get_rdn_value() != object_rdn_val:
error_count += 1
self.report("ERROR: Not fixing %s=%r on '%s'" % (object_rdn_attr, object_rdn_val, str(obj.dn)))
show_dn = True
if got_repl_property_meta_data:
if obj.dn == deleted_objects_dn:
isDeletedAttId = 131120
# It's 29/12/9999 at 23:59:59 UTC as specified in MS-ADTS 7.1.1.4.2 Deleted Objects Container
expectedTimeDo = 2650466015990000000
originating = self.get_originating_time(obj["replPropertyMetaData"], isDeletedAttId)
if originating != expectedTimeDo:
if self.confirm_all("Fix isDeleted originating_change_time on '%s'" % str(dn), 'fix_time_metadata'):
nmsg = ldb.Message()
nmsg.dn = dn
nmsg["isDeleted"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isDeleted")
error_count += 1
self.samdb.modify(nmsg, controls=["provision:0"])
else:
self.report("Not fixing isDeleted originating_change_time on '%s'" % str(dn))
for att in list_attrs_seen:
if not att in list_attrs_from_md:
if show_dn:
self.report("On object %s" % dn)
show_dn = False
error_count += 1
self.report("ERROR: Attribute %s not present in replication metadata" % att)
if not self.confirm_all("Fix missing replPropertyMetaData element '%s'" % att, 'fix_all_metadata'):
self.report("Not fixing missing replPropertyMetaData element '%s'" % att)
continue
self.fix_metadata(dn, att)
if self.is_fsmo_role(dn):
if "fSMORoleOwner" not in obj and ("*" in attrs or "fsmoroleowner" in map(str.lower, attrs)):
self.err_no_fsmoRoleOwner(obj)
error_count += 1
try:
if dn != self.samdb.get_root_basedn():
res = self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE,
controls=["show_recycled:1", "show_deleted:1"])
except ldb.LdbError, (enum, estr):
if enum == ldb.ERR_NO_SUCH_OBJECT:
self.err_missing_parent(obj)
error_count += 1
else:
raise
if dn in self.deleted_objects_containers and '*' in attrs:
if self.is_deleted_deleted_objects(obj):
self.err_deleted_deleted_objects(obj)
error_count += 1
return error_count
################################################################
# check special @ROOTDSE attributes
def check_rootdse(self):
'''check the @ROOTDSE special object'''
dn = ldb.Dn(self.samdb, '@ROOTDSE')
if self.verbose:
self.report("Checking object %s" % dn)
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE)
if len(res) != 1:
self.report("Object %s disappeared during check" % dn)
return 1
obj = res[0]
error_count = 0
# check that the dsServiceName is in GUID form
if not 'dsServiceName' in obj:
self.report('ERROR: dsServiceName missing in @ROOTDSE')
return error_count+1
if not obj['dsServiceName'][0].startswith('<GUID='):
self.report('ERROR: dsServiceName not in GUID form in @ROOTDSE')
error_count += 1
if not self.confirm('Change dsServiceName to GUID form?'):
return error_count
res = self.samdb.search(base=ldb.Dn(self.samdb, obj['dsServiceName'][0]),
scope=ldb.SCOPE_BASE, attrs=['objectGUID'])
guid_str = str(ndr_unpack(misc.GUID, res[0]['objectGUID'][0]))
m = ldb.Message()
m.dn = dn
m['dsServiceName'] = ldb.MessageElement("<GUID=%s>" % guid_str,
ldb.FLAG_MOD_REPLACE, 'dsServiceName')
if self.do_modify(m, [], "Failed to change dsServiceName to GUID form", validate=False):
self.report("Changed dsServiceName to GUID form")
return error_count
###############################################
# re-index the database
def reindex_database(self):
'''re-index the whole database'''
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, "@ATTRIBUTES")
m['add'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_ADD, 'force_reindex')
m['delete'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_DELETE, 'force_reindex')
return self.do_modify(m, [], 're-indexed database', validate=False)
###############################################
# reset @MODULES
def reset_modules(self):
'''reset @MODULES to that needed for current sam.ldb (to read a very old database)'''
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, "@MODULES")
m['@LIST'] = ldb.MessageElement('samba_dsdb', ldb.FLAG_MOD_REPLACE, '@LIST')
return self.do_modify(m, [], 'reset @MODULES on database', validate=False)
| gpl-3.0 | 9,082,774,087,875,641,000 | 44.853064 | 283 | 0.55778 | false |
CUFCTL/DLBD | face-detection-code/object_detection/core/standard_fields.py | 3 | 9329 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
original_image: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_label_scores: groundtruth label scores.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
"""
image = 'image'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
true_image_shape = 'true_image_shape'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
num_detections: number of detections in the batch.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
num_detections = 'num_detections'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| mit | -876,625,569,367,002,400 | 42.593458 | 80 | 0.738557 | false |
hckiang/DBLite | boost_1_54_0/tools/build/v2/test/testing_support.py | 27 | 1970 | #!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests different aspects of Boost Builds automated testing support.
import BoostBuild
################################################################################
#
# test_files_with_spaces_in_their_name()
# --------------------------------------
#
################################################################################
def test_files_with_spaces_in_their_name():
"""Regression test making sure test result files get created correctly when
testing files with spaces in their name.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("valid source.cpp", "int main() {}\n");
t.write("invalid source.cpp", "this is not valid source code");
t.write("jamroot.jam", """
import testing ;
testing.compile "valid source.cpp" ;
testing.compile-fail "invalid source.cpp" ;
""")
t.run_build_system(status=0)
t.expect_addition("bin/invalid source.test/$toolset/debug/invalid source.obj")
t.expect_addition("bin/invalid source.test/$toolset/debug/invalid source.test")
t.expect_addition("bin/valid source.test/$toolset/debug/valid source.obj")
t.expect_addition("bin/valid source.test/$toolset/debug/valid source.test")
t.expect_content("bin/valid source.test/$toolset/debug/valid source.test", \
"passed" )
t.expect_content( \
"bin/invalid source.test/$toolset/debug/invalid source.test", \
"passed" )
t.expect_content( \
"bin/invalid source.test/$toolset/debug/invalid source.obj", \
"failed as expected" )
t.cleanup()
################################################################################
#
# main()
# ------
#
################################################################################
test_files_with_spaces_in_their_name()
| gpl-3.0 | 1,231,372,635,234,573,000 | 31.295082 | 83 | 0.559391 | false |
xen0l/ansible | lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_facts.py | 4 | 7323 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_appserviceplan_facts
version_added: "2.7"
short_description: Get azure app service plan facts.
description:
- Get facts for a specific app service plan or all app service plans in a resource group, or all app service plan in current subscription.
options:
name:
description:
- Only show results for a specific app service plan.
resource_group:
description:
- Limit results by resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu (@yungezz)"
'''
EXAMPLES = '''
- name: Get facts for app service plan by name
azure_rm_appserviceplan_facts:
resource_group: testrg
name: winwebapp1
- name: Get azure_rm_appserviceplan_facts for app service plan in resource group
azure_rm_webapp_facts:
resource_group: testrg
- name: Get facts for app service plan with tags
azure_rm_appserviceplan_facts:
tags:
- testtag
- foo:bar
'''
RETURN = '''
appserviceplans:
description: List of app service plans.
returned: always
type: complex
contains:
id:
description: Id of the app service plan.
returned: always
type: str
sample: /subscriptions/xxxx/resourceGroupsxxx/providers/Microsoft.Web/serverfarms/xxx
name:
description: Name of the app service plan.
returned: always
type: str
resource_group:
description: Resource group of the app service plan.
returned: always
type: str
location:
description: Location of the app service plan.
returned: always
type: str
kind:
description: Kind of the app service plan.
returned: always
type: str
sample: app
sku:
description: Sku of the app service plan.
returned: always
type: complex
contains:
name:
description: Name of sku.
returned: always
type: str
sample: S1
family:
description: Family of sku.
returned: always
type: str
sample: S
size:
description: Size of sku.
returned: always
type: str
sample: S1
tier:
description: Tier of sku.
returned: always
type: str
sample: Standard
capacity:
description: Capacity of sku.
returned: always
type: int
sample: 1
'''
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'AppServicePlan'
class AzureRMAppServicePlanFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_appserviceplans=[])
)
self.name = None
self.resource_group = None
self.tags = None
self.info_level = None
super(AzureRMAppServicePlanFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['appserviceplans'] = self.list_by_name()
elif self.resource_group:
self.results['appserviceplans'] = self.list_by_resource_group()
else:
self.results['appserviceplans'] = self.list_all()
return self.results
def list_by_name(self):
self.log('Get app service plan {0}'.format(self.name))
item = None
result = []
try:
item = self.web_client.app_service_plans.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
curated_result = self.construct_curated_plan(item)
result = [curated_result]
return result
def list_by_resource_group(self):
self.log('List app service plans in resource groups {0}'.format(self.resource_group))
try:
response = list(self.web_client.app_service_plans.list_by_resource_group(self.resource_group))
except CloudError as exc:
self.fail("Error listing app service plan in resource groups {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
curated_output = self.construct_curated_plan(item)
results.append(curated_output)
return results
def list_all(self):
self.log('List app service plans in current subscription')
try:
response = list(self.web_client.app_service_plans.list())
except CloudError as exc:
self.fail("Error listing app service plans: {1}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
curated_output = self.construct_curated_plan(item)
results.append(curated_output)
return results
def construct_curated_plan(self, plan):
plan_info = self.serialize_obj(plan, AZURE_OBJECT_CLASS)
curated_output = dict()
curated_output['id'] = plan_info['id']
curated_output['name'] = plan_info['name']
curated_output['resource_group'] = plan_info['properties']['resourceGroup']
curated_output['location'] = plan_info['location']
curated_output['tags'] = plan_info.get('tags', None)
curated_output['is_linux'] = False
curated_output['kind'] = plan_info['kind']
curated_output['sku'] = plan_info['sku']
if plan_info['properties'].get('reserved', None):
curated_output['is_linux'] = True
return curated_output
def main():
AzureRMAppServicePlanFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | -2,816,926,059,863,115,300 | 29.640167 | 142 | 0.563977 | false |
blueburningcoder/pybrain | examples/rl/environments/cartpole/cart_fem.py | 30 | 1688 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with FEM on the CartPoleEnvironment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
#########################################################################
__author__ = "Thomas Rueckstiess, Frank Sehnke"
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import FEM
from pybrain.rl.experiments import EpisodicExperiment
batch=2 #number of samples per learning step
prnts=100 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, FEM(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
#print "Epsilon : ", agent.learner.sigma
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
| bsd-3-clause | -2,547,020,447,445,384,700 | 36.511111 | 79 | 0.675948 | false |
gymnasium/edx-platform | common/djangoapps/third_party_auth/tests/test_middleware.py | 14 | 1586 | """
Tests for third party auth middleware
"""
import mock
from django.contrib.messages.middleware import MessageMiddleware
from django.http import HttpResponse
from django.test.client import RequestFactory
from requests.exceptions import HTTPError
from openedx.core.djangolib.testing.utils import skip_unless_lms
from third_party_auth.middleware import ExceptionMiddleware
from third_party_auth.tests.testutil import TestCase
from student.helpers import get_next_url_for_login_page
class ThirdPartyAuthMiddlewareTestCase(TestCase):
"""Tests that ExceptionMiddleware is correctly redirected"""
@skip_unless_lms
@mock.patch('django.conf.settings.MESSAGE_STORAGE', 'django.contrib.messages.storage.cookie.CookieStorage')
def test_http_exception_redirection(self):
"""
Test ExceptionMiddleware is correctly redirected to login page
when PSA raises HttpError exception.
"""
request = RequestFactory().get("dummy_url")
next_url = get_next_url_for_login_page(request)
login_url = '/login?next=' + next_url
request.META['HTTP_REFERER'] = 'http://example.com:8000/login'
exception = HTTPError()
exception.response = HttpResponse(status=502)
# Add error message for error in auth pipeline
MessageMiddleware().process_request(request)
response = ExceptionMiddleware().process_exception(
request, exception
)
target_url = response.url
self.assertEqual(response.status_code, 302)
self.assertTrue(target_url.endswith(login_url))
| agpl-3.0 | -6,626,307,803,739,780,000 | 36.761905 | 111 | 0.722573 | false |
AdauriSilva/ifba-projeto2017.1 | main.py | 1 | 3201 | #-*-coding:utf8;-*- apenas no Qpython mobile
lista=""
produtos=[['NOME DO PRODUTO: ','VALIDADE: ','ESTADO: 1-SOLIDO 2-LIQUIDO 3-GASOSO','MASSA(Kg): ','PRECO (EM REAIS POR GRAMA): ']]
medicamentos=[['NOME DO MEDICAMENTO: ','PRINCIPIO ATIVO: ','CONCENTRACAO: ','VALIDADE: ','COMPOSICAO']]
funcionarios=[['NOME: ','SALARIO (EM REAIS): ','MEDICAMENTO: ']]
relatorios=[]
segmento=[]
def menu_principal(lista,segmento):
print("***** MENU PRINCIPAL *****")
print("1 - Produtos Químicos")
print("2 - Medicamentos")
print("3 - Funcionários")
print("4 - Relatórios")
opcao=raw_input("Digite a opção desejada: ")
if opcao=="1":
lista='PRODUTO'
segmento=produtos
menu_produto(lista,segmento)
if opcao=="2":
lista='MEDICAMENTO'
segmento=medicamentos
menu_medicamento(lista,segmento)
if opcao=="3":
lista='FUNCIONARIO'
segmento=funcionarios
menu_funcionario(lista,segmento)
if opcao=="4":
lista='RELATORIO'
segmento=relatorios
menu_relatorio(lista,segmento)
menu_principal(lista,segmento)
def menu_produto(lista,segmento):
print("***** PRODUTOS *****")
menu_sec(lista,segmento)
def menu_medicamento(lista,segmento):
print("***** MEDICAMENTOS *****")
menu_sec(lista,segmento)
def menu_funcionario(lista,segmento):
print("***** FUNCIONÁRIOS *****")
menu_sec(lista,segmento)
def menu_relatorio(lista,segmento):
print("***** RELATÓRIOS *****")
print("Informa se um quantidade N de unidades comerciais de um dado medicamento pode ser ou nao")
print("fabricadaconsiderando o a disponibilidade de produtos quımicos necessarios e de funcionarios habilitados")
print("Informa os produtos quımicos e medicamentos a vencerem nos proximos 10 dias")
print("Informa qual medicamento possui o maior numero de funcionarios habilitados para sua producao,")
print("independentemente da existencia de produtos quımicos em estoque.")
menu_principal(lista,segmento)
def cadastro(lista,segmento):
print '***** CADASTRAR - ',lista,' *****'
def busca(lista,segmento):
print '***** BUSCAR - ',lista,' *****'
def atualiza(lista,segmento):
print '***** ATUALIZAR - ',lista,' *****'
def remove(lista,segmento):
print '***** REMOVER - ',lista,' *****'
def menu_sec(lista,segmento):
print("1 - Cadastrar")
print("2 - Buscar")
print("3 - Atualizar")
print("4 - Remover")
opcao_s=raw_input("Digite a opção desejada: ")
if opcao_s=="1":
cadastro(segmento)
if opcao_s=="2":
busca(lista)
if opcao_s=="3":
atualiza(lista)
if opcao_s=="4":
remove(lista)
menu_sec(lista,segmento)
def cadastro(original):
add=[]
referencia=original[0]
for campo in referencia:
if campo=='MEDICAMENTO: ':
med=[]
qtdHabilidade=input("Qtd de produtos que conhece: ")
for i in range(qtdHabilidade):
tempMedic=raw_input("Digite o nome do medicamento: ")
med.append(tempMedic)
add.append(med)
else:
x=raw_input(campo)
add.append(x)
original.append(add)
menu_principal(lista,segmento)
| gpl-3.0 | 4,984,166,948,084,962,000 | 32.925532 | 128 | 0.635936 | false |
tomkooij/sapphire | sapphire/tests/transformations/test_geographic.py | 1 | 1570 | import unittest
from sapphire.transformations import geographic
class GeographicTransformationTests(unittest.TestCase):
def setUp(self):
self.ref_lla = (52.35592417, 4.95114402, 56.10234594)
self.transform = geographic.FromWGS84ToENUTransformation(self.ref_lla)
def test_attributes(self):
self.assertEqual(self.ref_lla, self.transform.ref_lla)
ref_ecef = (3889144.77, 336914.68, 5027133.30)
self.assert_tuple_almost_equal(ref_ecef, self.transform.ref_ecef, 2)
def test_ecef_to_lla(self):
lla = self.transform.ecef_to_lla(self.transform.ref_ecef)
self.assert_tuple_almost_equal(self.ref_lla, lla)
def test_ecef_to_enu(self):
enu = self.transform.ecef_to_enu(self.transform.ref_ecef)
ecef = self.transform.enu_to_ecef(enu)
self.assert_tuple_almost_equal(self.transform.ref_ecef, ecef)
def test_lla_to_enu(self):
enu = self.transform.lla_to_enu(self.transform.ref_lla)
lla = self.transform.enu_to_lla(enu)
self.assert_tuple_almost_equal(self.transform.ref_lla, lla)
def assert_tuple_almost_equal(self, actual, expected, places=7):
self.assertIsInstance(actual, tuple)
self.assertIsInstance(expected, tuple)
msg = "Tuples differ: %s != %s" % (str(actual), str(expected))
for actual_value, expected_value in zip(actual, expected):
self.assertAlmostEqual(actual_value, expected_value, places=places,
msg=msg)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,101,746,015,349,972,000 | 35.511628 | 79 | 0.661146 | false |
SYHGroup/mau_mau_bot | internationalization.py | 2 | 4707 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Telegram bot to play UNO in group chats
# Copyright (c) 2016 Jannes Höke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
from functools import wraps
from locales import available_locales
from pony.orm import db_session
from user_setting import UserSetting
from shared_vars import gm
GETTEXT_DOMAIN = 'unobot'
GETTEXT_DIR = 'locales'
class _Underscore(object):
"""Class to emulate flufl.i18n behaviour, but with plural support"""
def __init__(self):
self.translators = {
locale: gettext.GNUTranslations(
open(gettext.find(
GETTEXT_DOMAIN, GETTEXT_DIR, languages=[locale]
), 'rb')
)
for locale
in available_locales.keys()
if locale != 'en_US' # No translation file for en_US
}
self.locale_stack = list()
def push(self, locale):
self.locale_stack.append(locale)
def pop(self):
if self.locale_stack:
return self.locale_stack.pop()
else:
return None
@property
def code(self):
if self.locale_stack:
return self.locale_stack[-1]
else:
return None
def __call__(self, singular, plural=None, n=1, locale=None):
if not locale:
locale = self.locale_stack[-1]
if locale not in self.translators.keys():
if n is 1:
return singular
else:
return plural
translator = self.translators[locale]
if plural is None:
return translator.gettext(singular)
else:
return translator.ngettext(singular, plural, n)
_ = _Underscore()
def __(singular, plural=None, n=1, multi=False):
"""Translates text into all locales on the stack"""
translations = list()
if not multi and len(set(_.locale_stack)) >= 1:
translations.append(_(singular, plural, n, 'en_US'))
else:
for locale in _.locale_stack:
translation = _(singular, plural, n, locale)
if translation not in translations:
translations.append(translation)
return '\n'.join(translations)
def user_locale(func):
@wraps(func)
@db_session
def wrapped(bot, update, *pargs, **kwargs):
user = _user_chat_from_update(update)[0]
with db_session:
us = UserSetting.get(id=user.id)
if us and us.lang != 'en':
_.push(us.lang)
else:
_.push('en_US')
result = func(bot, update, *pargs, **kwargs)
_.pop()
return result
return wrapped
def game_locales(func):
@wraps(func)
@db_session
def wrapped(bot, update, *pargs, **kwargs):
user, chat = _user_chat_from_update(update)
player = gm.player_for_user_in_chat(user, chat)
locales = list()
if player:
for player in player.game.players:
us = UserSetting.get(id=player.user.id)
if us and us.lang != 'en':
loc = us.lang
else:
loc = 'en_US'
if loc in locales:
continue
_.push(loc)
locales.append(loc)
result = func(bot, update, *pargs, **kwargs)
while _.code:
_.pop()
return result
return wrapped
def _user_chat_from_update(update):
try:
user = update.message.from_user
chat = update.message.chat
except (NameError, AttributeError):
try:
user = update.inline_query.from_user
chat = gm.userid_current[user.id].game.chat
except KeyError:
chat = None
except (NameError, AttributeError):
try:
user = update.chosen_inline_result.from_user
chat = gm.userid_current[user.id].game.chat
except (NameError, AttributeError, KeyError):
chat = None
return user, chat
| agpl-3.0 | 2,837,103,353,947,657,700 | 26.520468 | 74 | 0.580748 | false |
georgyberdyshev/ascend | pygtk/canvas/blocktype.py | 1 | 5395 | import pygtk
import re
pygtk.require('2.0')
import gtk
import ascpy
import os.path
import cairo
class BlockType():
"""
All data associated with the MODEL type that is represented by a block.
This includes the actual ASCEND TypeDescription as well as the NOTES that
are found to represent the inputs and outputs for this block, as well as
some kind of graphical representation(s) for the block. In the canvas-
based GUI, there will need to be a Cairo-based represention, for drawing
on the canvas, as well as some other form, for creating the icon in the
block palette.
"""
def __init__(self, typedesc, notesdb):
self.type = typedesc
self.notesdb = notesdb
self.arrays = []
self.gr = [] # this data structure is for Graphical Representation for custom icons
self.port_in = {} # this list is for location of input ports in custom icons
self.port_out = {} # this list is for location of output ports in custom icons
self.arrays = []
# FIXME BlockType should know what .a4c file to load in order to access
# its type definition, for use in unpickling.
self.sourcefile = None
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("inline"))
self.inputs = []
self.outputs = []
self.params = []
for n in nn:
t = n.getText()
if t[0:min(len(t),3)]=="in:":
self.inputs += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
elif t[0:min(len(t),4)]=="out:":
self.outputs += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
elif t[0:min(len(t),6)]=="param:":
self.params += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
self.iconfile = None
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("icon"))
if nn:
n = nn[0].getText()
if os.path.exists(n):
self.iconfile = n
self.name = None
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("block"))
if nn:
self.name = nn[0].getText()
#fetching the graphic string from model file, string manipulating it and storing it in
#list of list
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("graphic"))
if nn:
t = nn[0].getText().split("\n")
for n in t:
temp = n.split("-")
ll = []
for k in temp:
tt = k.split(",")
pp = []
for q in tt:
q = q.strip("\t")
pp.append(q)
ll.append(pp)
self.gr.append(ll)
self.iconfile = self.create_icon(48,48)
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("port_in"))
if nn:
n = nn[0].getText().split(" ")
for m in n:
tt = m.split("-")
for k in tt:
tpp = k.split(":")
loc = tpp[1].split(",")
xy = []
xy.append(loc[0])
xy.append(loc[1])
self.port_in[str(tpp[0])] = xy
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("port_out"))
if nn:
n = nn[0].getText().split(" ")
for m in n:
tt = m.split("-")
for k in tt:
tpp = k.split(":")
loc = tpp[1].split(",")
xy = []
xy.append(loc[0])
xy.append(loc[1])
self.port_out[str(tpp[0])] = xy
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("array"))
for n in nn:
if n:
t = n.getText()
self.arrays.append([n.getText(),self.type.findMember(n.getText())])
#print self.arrays
def get_icon(self, width, height):
"""
Get a pixbuf representation of the block for use in the block palette
(or possibly elsewhere)
"""
f = self.iconfile
if self.iconfile is None:
f = "defaultblock.svg"
return gtk.gdk.pixbuf_new_from_file_at_size(str(f),width,height)
def create_icon(self,width,height):
properties = self.gr
if len(properties) == 0:
return None
fo = file("%s.svg"%self.name,'w')
## Prepare a destination surface -> out to an SVG file!
surface = cairo.SVGSurface (fo,width,height)
c = cairo.Context (surface)
for m in properties:
c.move_to(float(m[0][0])*width*0.1,float(m[0][1])*height*0.1)
for mm in m:
c.line_to(float(mm[0])*width*0.1,float(mm[1])*height*0.1)
c.stroke()
surface.finish()
return fo.name
def __getstate__(self):
state = self.__dict__.copy()
state['type'] = str(self.type)
state['notesdb'] = None
state['inputs'] = []
state['outputs'] = []
state['params'] = []
#state['inputs'] = [[str(x) for x in self.inputs[i]] for i in range(len(self.inputs))]
#state['outputs'] = [[str(x) for x in self.outputs[i]] for i in range(len(self.outputs))]
#state['params'] = [[str(x) for x in self.params[i]] for i in range(len(self.params))]
return(state)
def __setstate__(self, state):
self.__dict__ = state
def reattach_ascend(self,library, notesdb):
self.type = library.findType(self.type)
nn = notesdb.getTypeRefinedNotesLang(self.type,ascpy.SymChar("inline"))
self.inputs = []
self.outputs = []
self.params = []
for n in nn:
t = n.getText()
if t[0:min(len(t),3)]=="in:":
self.inputs += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
elif t[0:min(len(t),4)]=="out:":
self.outputs += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
elif t[0:min(len(t),6)]=="param:":
self.params += [[n.getId(),self.type.findMember(n.getId()),str(t)]]
print "Reattached type '%s', with %d inputs, %d outputs" % (self.type.getName(), len(self.inputs), len(self.outputs))
def get_input_name(self, index):
return self.inputs[index].getText()
def get_output_name(self, index):
return self.outputs[index].getText()
| gpl-2.0 | -6,843,942,618,406,928,000 | 29.480226 | 119 | 0.638369 | false |
kou/arrow | dev/archery/archery/utils/report.py | 6 | 1934 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
import datetime
import jinja2
def markdown_escape(s):
for char in ('*', '#', '_', '~', '`', '>'):
s = s.replace(char, '\\' + char)
return s
class Report(metaclass=ABCMeta):
def __init__(self, **kwargs):
for field in self.fields:
if field not in kwargs:
raise ValueError('Missing keyword argument {}'.format(field))
self._data = kwargs
def __getattr__(self, key):
return self._data[key]
@abstractmethod
def fields(self):
pass
@property
@abstractmethod
def templates(self):
pass
class JinjaReport(Report):
def __init__(self, **kwargs):
self.env = jinja2.Environment(
loader=jinja2.PackageLoader('archery', 'templates')
)
self.env.filters['md'] = markdown_escape
self.env.globals['today'] = datetime.date.today
super().__init__(**kwargs)
def render(self, template_name):
template_path = self.templates[template_name]
template = self.env.get_template(template_path)
return template.render(**self._data)
| apache-2.0 | -4,725,650,479,019,826,000 | 29.21875 | 77 | 0.662875 | false |
Sumith1896/sympy | sympy/printing/tests/test_precedence.py | 77 | 2765 | from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.core.function import Derivative
from sympy.core.numbers import Integer, Rational, Float, oo
from sympy.core.relational import Rel
from sympy.core.symbol import symbols
from sympy.functions import sin
from sympy.integrals.integrals import Integral
from sympy.series.order import Order
from sympy.printing.precedence import precedence, PRECEDENCE
x, y = symbols("x,y")
def test_Add():
assert precedence(x + y) == PRECEDENCE["Add"]
assert precedence(x*y + 1) == PRECEDENCE["Add"]
def test_Function():
assert precedence(sin(x)) == PRECEDENCE["Atom"]
assert precedence(Derivative(x, y)) == PRECEDENCE["Atom"]
def test_Integral():
assert precedence(Integral(x, y)) == PRECEDENCE["Atom"]
def test_Mul():
assert precedence(x*y) == PRECEDENCE["Mul"]
assert precedence(-x*y) == PRECEDENCE["Add"]
def test_Number():
assert precedence(Integer(0)) == PRECEDENCE["Atom"]
assert precedence(Integer(1)) == PRECEDENCE["Atom"]
assert precedence(Integer(-1)) == PRECEDENCE["Add"]
assert precedence(Integer(10)) == PRECEDENCE["Atom"]
assert precedence(Rational(5, 2)) == PRECEDENCE["Mul"]
assert precedence(Rational(-5, 2)) == PRECEDENCE["Add"]
assert precedence(Float(5)) == PRECEDENCE["Atom"]
assert precedence(Float(-5)) == PRECEDENCE["Add"]
assert precedence(oo) == PRECEDENCE["Atom"]
assert precedence(-oo) == PRECEDENCE["Add"]
def test_Order():
assert precedence(Order(x)) == PRECEDENCE["Atom"]
def test_Pow():
assert precedence(x**y) == PRECEDENCE["Pow"]
assert precedence(-x**y) == PRECEDENCE["Add"]
assert precedence(x**-y) == PRECEDENCE["Pow"]
def test_Product():
assert precedence(Product(x, (x, y, y + 1))) == PRECEDENCE["Atom"]
def test_Relational():
assert precedence(Rel(x + y, y, "<")) == PRECEDENCE["Relational"]
def test_Sum():
assert precedence(Sum(x, (x, y, y + 1))) == PRECEDENCE["Atom"]
def test_Symbol():
assert precedence(x) == PRECEDENCE["Atom"]
def test_And_Or():
# precendence relations between logical operators, ...
assert precedence(x & y) > precedence(x | y)
assert precedence(~y) > precedence(x & y)
# ... and with other operators (cfr. other programming languages)
assert precedence(x + y) > precedence(x | y)
assert precedence(x + y) > precedence(x & y)
assert precedence(x*y) > precedence(x | y)
assert precedence(x*y) > precedence(x & y)
assert precedence(~y) > precedence(x*y)
assert precedence(~y) > precedence(x - y)
# double checks
assert precedence(x & y) == PRECEDENCE["And"]
assert precedence(x | y) == PRECEDENCE["Or"]
assert precedence(~y) == PRECEDENCE["Not"]
| bsd-3-clause | -2,147,484,286,455,812,000 | 30.420455 | 70 | 0.671248 | false |
slzatz/esp8266 | conway_plus.py | 1 | 3401 | '''
This code implements Conways Game of Life using micropython on an ESP8266
It creates the color of a new life form based on the color of the three "parents"
Some code borrow from a video Lady Ada did
'''
from machine import Pin #freq
import neopixel
import utime
import uos
# at one point thought I had to increase freq for pixels to always fire correctly
#freq(160000000)
PIXEL_WIDTH = const(8)
PIXEL_HEIGHT = const(8)
MAX_BRIGHT = const(10)
pixels = neopixel.NeoPixel(Pin(13, Pin.OUT), PIXEL_WIDTH*PIXEL_HEIGHT)
def conway_step():
global board
global color_board
# need to do the following equivalent to deep copy
new_board = [board[i][:] for i in range(len(board))]
new_color_board = [color_board[i][:] for i in range(len(board))]
changed = False
for x in range(PIXEL_HEIGHT):
for y in range(PIXEL_WIDTH):
num_neighbors = board[x-1][y-1] + board[x][y-1] + board[x+1][y-1] + board[x-1][y] \
+ board[x+1][y] + board[x+1][y+1] + board[x][y+1] + board[x-1][y+1]
if board[x][y] and not (2 <= num_neighbors <=3):
new_board[x][y] = 0
new_color_board[x][y] = (0,0,0)
changed = True
elif not board[x][y] and num_neighbors == 3:
new_board[x][y] = 1
#color_board[x][y] = color()
# to add multiple same length tuples: tuple(map(sum, zip(a,b,...)))
# note that only three of the below should be nonzero - (0,0,0,)
color = tuple(map(sum, zip(color_board[x-1][y-1], color_board[x][y-1], color_board[x+1][y-1], color_board[x-1][y],
color_board[x+1][y], color_board[x+1][y+1], color_board[x][y+1], color_board[x-1][y+1])))
new_color_board[x][y] = (color[0]//3, color[1]//3, color[2]//3)
print("New dots color =", new_color_board[x][y])
changed = True
board = new_board
color_board = new_color_board
return changed
def conway_rand():
global board
global color_board
pixels.fill((0,0,0))
pixels.write()
utime.sleep(2)
# note that the + 1 below means that the board[X][-1] pulls in zero from a position beyond the grid
board = [[0 for j in range(PIXEL_WIDTH + 1)] for i in range(PIXEL_HEIGHT + 1)]
color_board = [[(0,0,0) for j in range(PIXEL_WIDTH + 1)] for i in range(PIXEL_HEIGHT + 1)]
for x in range(PIXEL_HEIGHT):
for y in range(PIXEL_WIDTH):
board[x][y] = uos.urandom(1)[0]//128
if board[x][y]:
color_board[x][y] = color()
else:
color_board[x][y] = (0,0,0)
print("--------------------------------------------")
for x in range(PIXEL_HEIGHT):
print(board[x][0:PIXEL_WIDTH])
for y in range(PIXEL_WIDTH):
pixels[x * PIXEL_HEIGHT + y] = color_board[x][y]
print("--------------------------------------------")
pixels.write()
utime.sleep(3)
def random():
return MAX_BRIGHT*(uos.urandom(1)[0]//128)
def color():
return (random(), random(), random())
conway_rand()
n = 0
while True:
if not conway_step():
utime.sleep(5)
conway_rand()
print("--------------------------------------------")
for x in range(PIXEL_HEIGHT):
print(board[x][0:PIXEL_WIDTH])
for y in range(PIXEL_WIDTH):
pixels[x * PIXEL_HEIGHT + y] = color_board[x][y]
print("--------------------------------------------")
pixels.write()
utime.sleep(0.1)
n+=1
if n > 100:
conway_rand()
n = 0
| mit | -4,634,686,732,048,949,000 | 29.918182 | 140 | 0.568362 | false |
nickweinberg/Python-Video-Poker-Sim | video_poker_sim/enumerations.py | 1 | 6955 | import numpy as np
import itertools
import numpy as np
import scipy.misc as sc
import cPickle as pickle
import itertools
from itertools import combinations
import sys
import os
import timeit
from hand_scoring import get_hand_type, payout
"""
1) Enumerate all the possibilties.
2) Save these data structures to disc.
-That way we only have to run this operation once.
"""
def sort_and_join(combo):
# str combo ie. ['12', '13']
return '-'.join(sorted(combo))
def create_deck():
# returns deck
color = [1,2,3,4]
value = [0,1,2,3,4,5,6,7,8,9,10,11,12]
color_str = [str(c) for c in color]
value_str = [str(v) for v in value]
return [[v, c] for c in color for v in value]
def create_dicts_and_save():
start_time = timeit.default_timer()
# create deck
color = [1,2,3,4] #names of suits dont matter
value = [0,1,2,3,4,5,6,7,8,9,10,11,12]
color_str = [str(c) for c in color]
value_str = [str(v) for v in value]
deck = [''.join([v, c]) for c in color_str
for v in value_str ]
"""
possible hand ranks = 0,1,2,3,4,5,6,7,8,9
{
# index is hand rank, value is # of possibilities
'CARD_STR': [0,0,0,0,0,0,0,0,0]
}
"""
total_hand_combos = [0 for _ in range(9)]
# total # of combinations of each hand type for individual card
total_hand_combos_one = {card: [0 for _ in range(9)] for card in deck}
# " " for two cards
total_hand_combos_two = {sort_and_join(combo): [0 for _ in range(9)]
for combo in combinations(deck, 2)}
# " " for three cards
total_hand_combos_three = {sort_and_join(combo): [0 for _ in range(9)]
for combo in combinations(deck, 3)}
# " " for four cards
total_hand_combos_four = {sort_and_join(combo): [0 for _ in range(9)]
for combo in combinations(deck, 4)}
# " " for five cards - a dealt hand. each hand type is easy 0 or 1
total_hand_combos_five = {sort_and_join(combo): [0 for _ in range(9)]
for combo in combinations(deck, 5)}
print('runtime: %f') % (timeit.default_timer() - start_time)
# save to disk
pickle.dump(total_hand_combos_one, open('data/total_hand_combos_one.p', 'wb'))
pickle.dump(total_hand_combos_two, open('data/total_hand_combos_two.p', 'wb'))
pickle.dump(total_hand_combos_three, open('data/total_hand_combos_three.p', 'wb'))
pickle.dump(total_hand_combos_four, open('data/total_hand_combos_four.p', 'wb'))
pickle.dump(total_hand_combos_five, open('data/total_hand_combos_five.p', 'wb'))
print('files saved')
def load_dicts(filename):
# one_c = pickle.load(open('data/total_hand_combos_one.p', 'rb'))
# two_c = pickle.load(open('data/total_hand_combos_two.p', 'rb'))
# three_c = pickle.load(open('data/total_hand_combos_three.p', 'rb'))
# four_c = pickle.load(open('data/total_hand_combos_four.p', 'rb'))
# five_c = pickle.load(open('data/total_hand_combos_five.p', 'rb'))
return pickle.load(open('data/%s' % (filename), 'rb'))
def str_to_card(card_str):
# intput: card_str. ex. '131' or '11', etc.
# returns tuple (int, int)
if len(card_str) == 3:
rank = int(card_str[0:2])
suit = int(card_str[2])
elif len(card_str) == 2:
rank = int(card_str[0])
suit = int(card_str[1])
return rank, suit
def str_to_card_str(card_str):
str_list = []
if len(card_str) == 3:
str_list.append(card_str[0:2])
str_list.append(card_str[2])
elif len(card_str) == 2:
str_list.append(card_str[0])
str_list.append(card_str[1])
return ''.join(str_list)
def cards_to_str(hand):
return [''.join([str(c[0]), str(c[1])]) for c in hand]
# three_card_combos =
# four_card_combos =
# five_card_combos
# Loop through all 2,598,960 (or w/e possible hands)
def main():
start_time = timeit.default_timer() # timer to track runtime length
deck = create_deck() # create deck
total_hand_type_combos = [0 for _ in range(9)]
# load dictionaries
one_card_combos = load_dicts('total_hand_combos_one.p')
two_card_combos = load_dicts('total_hand_combos_two.p')
three_card_combos = load_dicts('total_hand_combos_three.p')
four_card_combos = load_dicts('total_hand_combos_four.p')
five_card_combos = load_dicts('total_hand_combos_five.p')
for hand in combinations(deck, 5):
# hand :: ex. ([0, 1],[3,4],[4,4],[8,4],[12,4])
# 1) Score the hand to determine what hand type it is.
# hand score 9 -> 8. (index of arrays - 1)
hand_score_index = get_hand_type(hand) - 1
# 2) Update total number of hands of type H
total_hand_type_combos[hand_score_index] += 1
# 3) For each of 5 individual cards, update the total
# number of hands of type H which include that card
for card in hand: # card :: [0, 1]
one_card_combos[''.join(
[str(card[0]), str(card[1])])][hand_score_index] += 1
# 4) For each of 10 combinations of 2 cards, update the total
# number of hands of type H which include both cards
# two_card_combos['C1-C2'][H] += 1
for combo in combinations(hand, 2):
two_card_str = sort_and_join(cards_to_str(combo))
two_card_combos[two_card_str][hand_score_index] += 1
# 5) For each of the 10 combinations of 3 cards, update the total
# number of hands of type H which includ all three cards
# three_card_combos['C1-C2-C3'][H] += 1
for combo in combinations(hand, 3):
three_card_str = sort_and_join(cards_to_str(combo))
three_card_combos[three_card_str][hand_score_index] += 1
# 6) For each of the 5 combinations of 4 cards, update the total
# number of hands of type H which include all four cards.
# four_card_combos['C1-C2-C3-C4'][H] += 1
for combo in combinations(hand, 4):
four_card_str = sort_and_join(cards_to_str(combo))
four_card_combos[four_card_str][hand_score_index] += 1
# 7) Update five_card_combos
# five_card_combos['C1-C2-C3-C4-C5'][H] = 1
five_card_str = sort_and_join(cards_to_str(hand))
five_card_combos[five_card_str][hand_score_index] = 1
# save to disk
pickle.dump(one_card_combos, open('data/one_card_hand_type.p', 'wb'))
pickle.dump(two_card_combos, open('data/two_card_hand_type.p', 'wb'))
pickle.dump(three_card_combos, open('data/three_card_hand_type.p', 'wb'))
pickle.dump(four_card_combos, open('data/four_card_hand_type.p', 'wb'))
pickle.dump(five_card_combos, open('data/five_card_hand_type.p', 'wb'))
print('files saved')
print('runtime: %f') % (timeit.default_timer() - start_time)
# create_dicts_and_save()
print('starting')
main()
| mit | 3,973,491,048,690,011,600 | 33.60199 | 86 | 0.598275 | false |
dharmabumstead/ansible | lib/ansible/module_utils/aws/waiters.py | 7 | 6747 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
try:
import botocore.waiter as core_waiter
except ImportError:
pass # caught by HAS_BOTO3
ec2_data = {
"version": 2,
"waiters": {
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(RouteTables[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidRouteTableID.NotFound",
"state": "retry"
},
]
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "retry"
},
]
},
"SubnetHasMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetNoMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetHasAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetNoAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "retry"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "success"
},
]
},
}
}
waf_data = {
"version": 2,
"waiters": {
"ChangeTokenInSync": {
"delay": 20,
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "ChangeTokenStatus == 'INSYNC'",
"state": "success"
},
{
"matcher": "error",
"expected": "WAFInternalErrorException",
"state": "retry"
}
]
}
}
}
def ec2_model(name):
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
return ec2_models.get_waiter(name)
def waf_model(name):
waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
return waf_models.get_waiter(name)
waiters_by_name = {
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
'route_table_exists',
ec2_model('RouteTableExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_route_tables
)),
('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
'subnet_exists',
ec2_model('SubnetExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_has_map_public',
ec2_model('SubnetHasMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_no_map_public',
ec2_model('SubnetNoMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_has_assign_ipv6',
ec2_model('SubnetHasAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_no_assign_ipv6',
ec2_model('SubnetNoAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
'subnet_deleted',
ec2_model('SubnetDeleted'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
}
def get_waiter(client, waiter_name):
try:
return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
except KeyError:
raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
| gpl-3.0 | 6,202,891,225,532,822,000 | 30.528037 | 112 | 0.454572 | false |
claudelee/bilibili-api | GetVideoUrl/biliDownLoad.py | 2 | 2733 | #!/usr/bin/env python3
import sys
import gzip
import json
import hashlib
import re
import urllib.parse
import urllib.request
import xml.dom.minidom
import zlib
USER_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
APPKEY = '85eb6835b0a1034e'
APPSEC = '2ad42749773c441109bdc0191257a664'
def GetBilibiliUrl(url):
overseas=False
url_get_media = 'http://interface.bilibili.com/playurl?' if not overseas else 'http://interface.bilibili.com/v_cdn_play?'
regex_match = re.findall('http:/*[^/]+/video/av(\\d+)(/|/index.html|/index_(\\d+).html)?(\\?|#|$)',url)
if not regex_match:
raise ValueError('Invalid URL: %s' % url)
aid = regex_match[0][0]
pid = regex_match[0][2] or '1'
cid_args = {'type': 'json', 'id': aid, 'page': pid}
resp_cid = urlfetch('http://api.bilibili.com/view?'+GetSign(cid_args,APPKEY,APPSEC))
resp_cid = dict(json.loads(resp_cid.decode('utf-8', 'replace')))
cid = resp_cid.get('cid')
media_args = {'cid': cid,'quality':4}
resp_media = urlfetch(url_get_media+GetSign(media_args,APPKEY,APPSEC))
media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(resp_media.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
return media_urls
def GetSign(params,appkey,AppSecret=None):
"""
获取新版API的签名,不然会返回-3错误
待添加:【重要!】
需要做URL编码并保证字母都是大写,如 %2F
"""
params['appkey']=appkey;
data = "";
paras = sorted(params)
paras.sort();
for para in paras:
if data != "":
data += "&";
data += para + "=" + str(params[para]);
if AppSecret == None:
return data
m = hashlib.md5()
m.update((data+AppSecret).encode('utf-8'))
return data+'&sign='+m.hexdigest()
def urlfetch(url):
req_headers = {'Accept-Encoding': 'gzip, deflate'}
req = urllib.request.Request(url=url, headers=req_headers)
response = urllib.request.urlopen(req, timeout=120)
content_encoding = response.info().get('Content-Encoding')
if content_encoding == 'gzip':
data = gzip.GzipFile(fileobj=response).read()
elif content_encoding == 'deflate':
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
data = decompressobj.decompress(response.read())+decompressobj.flush()
else:
data = response.read()
return data
if __name__ == '__main__':
if len(sys.argv) == 1:
print('输入视频播放地址')
else:
media_urls = GetBilibiliUrl(sys.argv[1])
for i in media_urls:
print(i)
| mit | 8,379,472,834,232,661,000 | 34.16 | 232 | 0.633675 | false |
epigenomics/methylmaps | MethylAnalyzer/UtilityFuncs.py | 1 | 6046 | # Utility functions
"""
Utility functions
"""
import os
import re
import fnmatch
from types import IntType, LongType, FloatType, NoneType, StringType
from MethylAnalyzer.MethError import MethError
# Define epsilon
EPSILON = 0.0000001
def sortedDictValues(adict):
"Sort a dict"
keys = adict.keys()
keys.sort()
return [adict[key] for key in keys]
def all_files(root, patterns='*', single_level=True):
"""
Iterate files under a given directory and for given patterns
Arguments:
o root - string, directory
o patterns - string, patterns separated by ;
o single_level - boolean, default: True
"""
# Expand patterns from semicolon
patterns = patterns.split(';')
for path, subdirs, files in os.walk(root):
files.sort()
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
break
if single_level:
break
def get_dbpara(db_file):
"Get database access information from a db parameter file"
dbfile = check_file(db_file, PATH='/Users/yurong/src')
dbfh = open(dbfile)
for line in dbfh:
line = line.rstrip()
line_list = line.split('\t')
if line_list[0] == 'dbhost':
dbhost = line_list[1]
elif line_list[0] == 'dbuser':
dbuser = line_list[1]
elif line_list[0] == 'dbpasswd':
dbpasswd = line_list[1]
dbfh.close()
return (dbhost, dbuser, dbpasswd)
def search_file(filename, search_path):
"Find file for given name and search path"
candidate = os.path.join(search_path, filename)
if os.path.isfile(candidate):
return os.path.abspath(candidate)
return None
def check_chr(chrN):
"Check the chromosome name: chrN"
if re.search('chr\w+', chrN):
return chrN
raise MethError('Wrong chromosome name %s' % chrN)
def check_dir(path):
if os.path.isdir(path):
return os.path.abspath(path)
raise MethError('Such directory [%s] does not exist' % path)
def check_file(filename, PATH=None):
"Check if the file exits"
if PATH is not None:
candidate = os.path.join(PATH, filename)
else:
candidate = filename
candidate = os.path.abspath(candidate)
if os.path.isfile(candidate):
return candidate
raise MethError('Cannot find file %s' % candidate)
def count_file_lines(file):
"""
Count lines of the file.
"""
count = -1
for count, line in enumerate(open(file, 'rU')):
pass
count += 1
return count
def finditer(text, pattern):
"Find the pattern and return the iterator"
pos = -1
while True:
pos = text.find(pattern, pos+1)
if pos < 0:
break
yield pos
def contstr(attrbt, precision=None):
"""
Convert other type to string.
The main purpose of the function is to check None type.
Arguments:
o attrbt - attribute(s) of objects
o precision - string of print format, default is '%.1f'
type:
o float
o int (including LongType)
o string
o None
o list
o tuple
o array
Return values:
o string of the attribute(s)
"""
if type(attrbt) is NoneType or attrbt == []:
return 'NULL'
elif type(attrbt) is StringType:
return attrbt
elif type(attrbt) is IntType or type(attrbt) is LongType:
return str(attrbt)
elif type(attrbt) is FloatType:
if precision == None:
return "%.1f" % attrbt
else:
return precision % attrbt
else: # list or other similar types
str_attrbt = []
for item in attrbt:
if type(item) is FloatType:
if precision == None:
str_attrbt.append("%.1f" % item)
else:
str_attrbt.append(precision % item)
elif type(item) is NoneType:
str_attrbt.append('NULL')
else:
str_attrbt.append(str(item))
return join(str_attrbt, "\t")
def read_seq(seqfh, format):
"Get the sequence based on the format (fasta or genbank)"
seq = ''
if format == 'fasta':
for line in seqfh:
if re.search('^>', line):
continue
if re.search('\w', line):
line = line.rstrip()
seq += line
else:
raise MethError('Wrong sequence format')
return seq.upper()
def get_sinfo(gfile):
"""
Get table information for two groups
Sample info file:
# group 1
table_name1 label1
# group 2
table_name2 label2
"""
g1_tabinfo = {} # sql table name: short sample name
g2_tabinfo = {}
gfh = open(gfile)
for line in gfh:
if re.search('^#', line) and re.search('group 1', line):
tinfo = gfh.next().rstrip().split('\t')
g1_tabinfo[tinfo[0]] = tinfo[1]
elif re.search('^#', line) and re.search('group 2', line):
tinfo = gfh.next().rstrip().split('\t')
g2_tabinfo[tinfo[0]] = tinfo[1]
else:
continue
gfh.close()
return g1_tabinfo, g2_tabinfo
def get_sinfo_nogroup(gfile):
"""
Samples are not divided into groups.
Sample info file:
table_name1 label1
table_name2 label2
Return:
o tabinfo - dict
"""
tabinfo = {} # sql table name: short sample name
gfh = open(gfile)
for line in gfh:
tinfo = line.rstrip().split('\t')
tabinfo[tinfo[0]] = tinfo[1]
gfh.close()
return tabinfo
def cpgdensity_byseq(start, end, seq):
"""
Compute CpG density for given coordinates and fasta file
Input:
o start - int, 0-based
o end - int, 1-based
o seq - sequence string
Output:
o CpG density - float
"""
cpg_num = seq.count('CG', start, end)
cpg_density = float(cpg_num * 2)/float(end - start)
return cpg_density
| gpl-3.0 | -3,655,132,230,393,646,600 | 26.607306 | 68 | 0.579226 | false |
priyaganti/rockstor-core | src/rockstor/smart_manager/views/base_service.py | 2 | 3648 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from smart_manager.models import (Service, ServiceStatus)
from django.conf import settings
from smart_manager.serializers import ServiceStatusSerializer
import json
import rest_framework_custom as rfc
from rest_framework.response import Response
from system.services import service_status
from django.db import transaction
from django.utils.timezone import utc
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class ServiceMixin(object):
def _save_config(self, service, config):
service.config = json.dumps(config)
return service.save()
def _get_config(self, service):
return json.loads(service.config)
def _get_or_create_sso(self, service):
ts = datetime.utcnow().replace(tzinfo=utc)
so = None
if (ServiceStatus.objects.filter(service=service).exists()):
so = ServiceStatus.objects.filter(
service=service).order_by('-ts')[0]
else:
so = ServiceStatus(service=service, count=0)
so.status = self._get_status(service)
so.count += 1
so.ts = ts
so.save()
return so
def _get_status(self, service):
try:
config = None
if (service.config is not None):
config = self._get_config(service)
o, e, rc = service_status(service.name, config)
if (rc == 0):
return True
return False
except Exception as e:
msg = ('Exception while querying status of service(%s): %s' %
(service.name, e.__str__()))
logger.error(msg)
logger.exception(e)
return False
class BaseServiceView(ServiceMixin, rfc.GenericView):
serializer_class = ServiceStatusSerializer
@transaction.atomic
def get_queryset(self, *args, **kwargs):
with self._handle_exception(self.request):
limit = self.request.query_params.get(
'limit', settings.REST_FRAMEWORK['MAX_LIMIT'])
limit = int(limit)
url_fields = self.request.path.strip('/').split('/')
if (len(url_fields) < 4):
sos = []
for s in Service.objects.all():
sos.append(self._get_or_create_sso(s))
return sorted(sos, cmp=lambda x, y: cmp(x.display_name, y.display_name)) # noqa
class BaseServiceDetailView(ServiceMixin, rfc.GenericView):
serializer_class = ServiceStatusSerializer
@transaction.atomic
def get(self, request, *args, **kwargs):
with self._handle_exception(self.request, msg=None):
url_fields = self.request.path.strip('/').split('/')
s = Service.objects.get(name=url_fields[3])
self.paginate_by = 0
serialized_data = ServiceStatusSerializer(
self._get_or_create_sso(s))
return Response(serialized_data.data)
| gpl-3.0 | -4,754,179,663,635,151,000 | 34.764706 | 96 | 0.640899 | false |
michael-dev2rights/ansible | lib/ansible/modules/cloud/amazon/rds_snapshot_facts.py | 1 | 5139 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_snapshot_facts
version_added: "2.5"
short_description: obtain facts about one or more RDS snapshots
description:
- obtain facts about one or more RDS snapshots. This does not currently include
- Aurora snapshots but may in future change to include them.
options:
db_snapshot_identifier:
description:
- Name of an RDS snapshot. Mutually exclusive with I(db_instance_identifier)
required: false
aliases:
- snapshot_name
db_instance_identifier:
description:
- RDS instance name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier)
required: false
snapshot_type:
description:
- Type of snapshot to find. By default both automated and manual
snapshots will be returned.
required: false
choices: ['automated', 'manual', 'shared', 'public']
requirements:
- "python >= 2.6"
- "boto3"
author:
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get facts about an snapshot
- rds_snapshot_facts:
db_snapshot_identifier: snapshot_name
register: new_database_facts
# Get all RDS snapshots for an RDS instance
- rds_snapshot_facts:
db_instance_identifier: helloworld-rds-master
'''
RETURN = '''
snapshots:
description: zero or more snapshots that match the (optional) parameters
type: list
returned: always
sample:
"snapshots": [
{
"availability_zone": "ap-southeast-2b",
"create_time": "2017-02-23T19:36:26.303000+00:00",
"id": "rds:helloworld-rds-master-2017-02-23-19-36",
"instance_created": "2017-02-16T23:04:16.619000+00:00",
"instance_id": "helloworld-rds-master",
"snapshot_type": "automated",
"status": "available"
}
]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
from ansible.module_utils.aws.rds import snapshot_to_facts
try:
import botocore
except BaseException:
pass # caught by imported HAS_BOTO3
def snapshot_facts(module, conn):
snapshot_name = module.params.get('db_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_instance_identifier')
params = dict()
if snapshot_name:
params['DBSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
# FIXME - shertel said we should replace custom paging logic with
# standard; can this apply here?
marker = ''
results = list()
while True:
try:
response = conn.describe_db_snapshots(Marker=marker, **params)
results = results + response["DBSnapshots"]
marker = response.get('Marker')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'DBSnapshotNotFound':
break
module.fail_json_aws(e, msg="trying to list all matching snapshots")
if not marker:
break
return dict(changed=False, snapshots=[snapshot_to_facts(snapshot) for snapshot in results])
argument_spec = dict(
db_snapshot_identifier=dict(aliases=['snapshot_name']),
db_instance_identifier=dict(),
snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
)
def main():
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier']],
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(
msg="Region not specified. Unable to determine region from configuration.")
# connect to the rds endpoint
conn = boto3_conn(module, 'client', 'rds', region, **aws_connect_params)
module.exit_json(**snapshot_facts(module, conn))
if __name__ == '__main__':
main()
| gpl-3.0 | 1,031,199,106,126,059,600 | 31.11875 | 104 | 0.658105 | false |
linkedin/indextank-service | backoffice/lib/flaptor_logging.py | 4 | 3368 | import logging as pylogging
from logging import config
import os
usingNativeLogger = True
__loggers = {}
def get_logger(name, force_new=False):
'''Get the Logger instance for a given name'''
global __loggers
if __loggers is None:
__loggers = {}
if force_new:
return pylogging.getLogger(name)
if not __loggers.has_key(name):
__loggers[name] = pylogging.getLogger(name)
return __loggers[name]
class SpecialFormatter(pylogging.Formatter):
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[37;4%dm"
PIDCOLOR_SEQ = "\033[1;3%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'WARN': YELLOW,
'INFO': GREEN,
'DEBU': BLUE,
'CRIT': RED,
'ERRO': RED
}
def __init__(self, *args, **kwargs):
pylogging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
if not hasattr(record, 'prefix'): record.prefix = ''
if not hasattr(record, 'suffix'): record.suffix = ''
if not hasattr(record, 'compname'): record.compname = ''
record.pid = os.getpid()
record.levelname = record.levelname[:4]
r = pylogging.Formatter.format(self, record)
if record.levelname in SpecialFormatter.COLORS:
levelcolor = SpecialFormatter.COLOR_SEQ % (SpecialFormatter.COLORS[record.levelname])
r = r.replace('$LEVELCOLOR', levelcolor)
r = r.replace('$RESET', SpecialFormatter.RESET_SEQ)
else:
r = r.replace('$COLOR', '')
r = r.replace('$RESET', '')
pidcolor = SpecialFormatter.COLOR_SEQ % (1 + (record.pid % 5))
r = r.replace('$PIDCOLOR', pidcolor)
r = r.replace('$BOLD', SpecialFormatter.BOLD_SEQ)
return r
pylogging.SpecialFormatter = SpecialFormatter
if usingNativeLogger:
try:
config.fileConfig('logging.conf')
except Exception, e:
print e
#class NativePythonLogger:
# def __init__(self, name):
# '''Creates a new Logger for the given name.
# Do not call this method directly, instead use
# get_logger(name) to get the appropriate instance'''
# self.name = name
# self.__logger = pylogging.getLogger(name)
# #self.updateLevel(5)
#
# def updateLevel(self, level):
# self.__level = level
# if level == 1:
# self.__logger.setLevel(pylogging.CRITICAL)
# elif level == 2:
# self.__logger.setLevel(pylogging.INFO)
# elif level == 3:
# self.__logger.setLevel(pylogging.WARNING)
# elif level == 4:
# self.__logger.setLevel(pylogging.INFO)
# elif level == 5:
# self.__logger.setLevel(pylogging.DEBUG)
#
# def debug(self, format_str, *values):
# self.__logger.debug(format_str, *values)
# def info(self, format_str, *values):
# self.__logger.info(format_str, *values)
# def warn(self, format_str, *values):
# self.__logger.warn(format_str, *values)
# def error(self, format_str, *values):
# self.__logger.error(format_str, *values)
# def exception(self, format_str, *values):
# self.__logger.exception(format_str, *values)
# def fatal(self, format_str, *values):
# self.__logger.critical(format_str, *values)
| apache-2.0 | -4,077,849,168,234,189,000 | 32.68 | 97 | 0.590558 | false |
makyo/honeycomb | administration/ban_views.py | 2 | 4290 | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import logout
from django.contrib.auth.decorators import (
login_required,
permission_required,
)
from django.contrib.auth.models import User
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import (
get_object_or_404,
redirect,
render,
)
from django.utils import timezone
from django.views.decorators.http import require_POST
from .forms import (
BanForm,
)
from .models import (
Ban,
Flag,
)
# from activitystream.models import Activity
# from usermgmt.models import Notification
@permission_required('administration.can_list_bans', raise_exception=True)
@staff_member_required
def list_bans(request):
if request.GET.get('all'):
bans = Ban.objects.all()
else:
bans = Ban.objects.filter(active=True)
return render(request, 'list_bans.html', {
'bans': bans,
'tab': 'bans',
'showing_inactive': request.GET.get('all'),
})
@permission_required('administration.can_list_bans', raise_exception=True)
@login_required
def list_participating_bans(request):
query = Q(admin_contact=request.user)
if request.GET.get('all') is None:
query &= Q(active=True)
bans = Ban.objects.filter(query)
return render(request, 'list_bans.html', {
'bans': bans,
'tab': 'bans',
'showing_inactive': request.GET.get('all')
})
@permission_required('administration.can_ban_users', raise_exception=True)
@staff_member_required
def create_ban(request):
if request.method == 'GET':
user = get_object_or_404(User, username=request.GET.get('user'))
else:
user = get_object_or_404(User, pk=request.POST.get('user'))
if user == request.user or user.is_superuser:
messages.error(request, "You cannot ban yourself or superusers.")
return render(request, 'permission_denied.html', {
'title': 'Permission denied',
}, status=403)
if not user.is_active:
messages.error(request, "You cannot ban an inactive user.")
return render(request, 'permission_denied.html', {
'title': 'Permission denied',
}, status=403)
form = BanForm(initial={
'user': user,
'end_date': timezone.now(),
'flags': Flag.objects.filter(pk=request.GET.get('flag')),
})
form.fields['flags'].queryset = Flag.objects.filter(
flagged_object_owner=user)
if request.method == 'POST':
form = BanForm(request.POST)
if form.is_valid():
ban = form.save(commit=False)
ban.admin_contact = request.user
ban.save()
form.save_m2m()
ban.user.profile.banned = True
ban.user.profile.save()
return redirect(ban.get_absolute_url())
return render(request, 'create_ban.html', {
'form': form,
'tab': 'bans',
})
@permission_required('administration.can_view_bans', raise_exception=True)
@staff_member_required
def view_ban(request, ban_id=None):
ban = get_object_or_404(Ban, pk=ban_id)
return render(request, 'view_ban.html', {
'title': "{}'s ban".format(ban.user.profile.get_display_name()),
'ban': ban,
'tab': 'bans',
})
@permission_required('administration.can_lift_bans', raise_exception=True)
@staff_member_required
@require_POST
def lift_ban(request, ban_id=None):
ban = get_object_or_404(Ban, pk=ban_id)
ban.active = False
ban.save()
ban.user.profile.banned = False
ban.user.profile.save()
ban.user.is_active = True
ban.user.save()
messages.success(request, "Ban lifted.")
return redirect(ban.get_absolute_url())
def ban_notice(request, ban_id=None, ban_hash=None):
ban = get_object_or_404(Ban, pk=ban_id, user_has_viewed=False)
if ban_hash != ban.get_ban_hash() or request.user != ban.user:
return render(request, 'permission_denied.html', {
'title': 'Permission denied',
}, status=403)
ban.user.is_active = False
ban.user.save()
logout(request)
return render(request, 'view_ban.html', {
'title': 'Your account has been disabled',
'ban': ban,
'tab': 'bans',
})
| mit | 6,471,479,856,083,493,000 | 30.086957 | 74 | 0.638462 | false |
citrix-openstack-build/tempest | tempest/cli/simple_read_only/test_keystone.py | 6 | 5000 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from oslo.config import cfg
import tempest.cli
from tempest.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
"""Basic, read-only tests for Keystone CLI client.
Checks return values and output of read-only commands.
These tests do not presume any content, nor do they create
their own. They only verify the structure of output if present.
"""
def test_admin_fake_action(self):
self.assertRaises(subprocess.CalledProcessError,
self.keystone,
'this-does-not-exist')
def test_admin_catalog_list(self):
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
if svc.get('__label'):
self.assertTrue(svc['__label'].startswith('Service:'),
msg=('Invalid beginning of service block: '
'%s' % svc['__label']))
# check that region and publicURL exists. One might also
# check for adminURL and internalURL. id seems to be optional
# and is missing in the catalog backend
self.assertIn('publicURL', svc.keys())
self.assertIn('region', svc.keys())
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
endpoints = self.parser.listing(out)
self.assertTableStruct(endpoints, [
'id', 'region', 'publicurl', 'internalurl',
'adminurl', 'service_id'])
def test_admin_endpoint_service_match(self):
endpoints = self.parser.listing(self.keystone('endpoint-list'))
services = self.parser.listing(self.keystone('service-list'))
svc_by_id = {}
for svc in services:
svc_by_id[svc['id']] = svc
for endpoint in endpoints:
self.assertIn(endpoint['service_id'], svc_by_id)
def test_admin_role_list(self):
roles = self.parser.listing(self.keystone('role-list'))
self.assertTableStruct(roles, ['id', 'name'])
def test_admin_service_list(self):
services = self.parser.listing(self.keystone('service-list'))
self.assertTableStruct(services, ['id', 'name', 'type', 'description'])
def test_admin_tenant_list(self):
tenants = self.parser.listing(self.keystone('tenant-list'))
self.assertTableStruct(tenants, ['id', 'name', 'enabled'])
def test_admin_user_list(self):
users = self.parser.listing(self.keystone('user-list'))
self.assertTableStruct(users, [
'id', 'name', 'enabled', 'email'])
def test_admin_user_role_list(self):
user_roles = self.parser.listing(self.keystone('user-role-list'))
self.assertTableStruct(user_roles, [
'id', 'name', 'user_id', 'tenant_id'])
def test_admin_discover(self):
discovered = self.keystone('discover')
self.assertIn('Keystone found at http', discovered)
self.assertIn('supports version', discovered)
def test_admin_help(self):
help_text = self.keystone('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: keystone')
commands = []
cmds_start = lines.index('Positional arguments:')
cmds_end = lines.index('Optional arguments:')
command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
for line in lines[cmds_start:cmds_end]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('catalog', 'endpoint-list', 'help',
'token-get', 'discover', 'bootstrap'))
self.assertFalse(wanted_commands - commands)
def test_admin_bashcompletion(self):
self.keystone('bash-completion')
# Optional arguments:
def test_admin_version(self):
self.keystone('', flags='--version')
def test_admin_debug_list(self):
self.keystone('catalog', flags='--debug')
def test_admin_timeout(self):
self.keystone('catalog', flags='--timeout %d' % CONF.cli.timeout)
| apache-2.0 | -754,083,462,413,544,800 | 36.878788 | 79 | 0.6262 | false |
RPGOne/Skynet | pytorch-master/torch/nn/functional.py | 1 | 24489 | """Functional interface"""
import torch
from . import _functions
from .modules import utils
from ._functions.padding import ConstantPad2d
from .modules.utils import _single, _pair, _triple
# Convolutions
ConvNd = torch._C._functions.ConvNd
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1):
"""Applies a 2D convolution over an input image composed of several input
planes.
See :class:`~torch.nn.Conv2d` for details and output shape.
Args:
input: input tensor (minibatch x in_channels x iH x iW)
weight: filters tensor (out_channels, in_channels/groups, kH, kW)
bias: optional bias tensor (out_channels)
stride: the stride of the convolving kernel. Can be a single number or
a tuple (sh x sw). Default: 1
padding: implicit zero padding on the input. Can be a single number or
a tuple. Default: 0
groups: split input into groups, in_channels should be divisible by
the number of groups
Examples:
>>> # With square kernels and equal stride
>>> filters = autograd.Variable(torch.randn(8,4,3,3))
>>> inputs = autograd.Variable(torch.randn(1,4,5,5))
>>> F.conv2d(inputs, filters, padding=1)
"""
f = ConvNd(_pair(stride), _pair(padding), _pair(dilation), False,
_pair(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1):
"""Applies a 1D convolution over an input signal composed of several input
planes.
See :class:`~torch.nn.Conv1d` for details and output shape.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight: filters of shape (out_channels, in_channels, kW)
bias: optional bias of shape (out_channels)
stride: the stride of the convolving kernel, default 1
Examples:
>>> filters = autograd.Variable(torch.randn(33, 16, 3))
>>> inputs = autograd.Variable(torch.randn(20, 16, 50))
>>> F.conv1d(inputs, filters)
"""
f = ConvNd(_single(stride), _single(padding), _single(dilation), False,
_single(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1):
"""Applies a 3D convolution over an input image composed of several input
planes.
See :class:`~torch.nn.Conv3d` for details and output shape.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight: filters tensor of shape (out_channels, in_channels, kT, kH, kW)
bias: optional bias tensor of shape (out_channels)
stride: the stride of the convolving kernel. Can be a single number or
a tuple (st x sh x sw). Default: 1
padding: implicit zero padding on the input. Can be a single number or
a tuple. Default: 0
Examples:
>>> filters = autograd.Variable(torch.randn(33, 16, 3, 3, 3))
>>> inputs = autograd.Variable(torch.randn(20, 16, 50, 10, 20))
>>> F.conv3d(inputs, filters)
"""
f = ConvNd(_triple(stride), _triple(padding), _triple(dilation), False,
_triple(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
def conv_transpose1d(input, weight, bias=None, stride=1, padding=0,
output_padding=0, groups=1):
f = ConvNd(_single(stride), _single(padding), _single(1), True,
_single(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0,
output_padding=0, groups=1):
"""Applies a 2D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution".
See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight: filters of shape (in_channels x out_channels x kH x kW)
bias: optional bias of shape (out_channels)
stride: the stride of the convolving kernel, a single number or a
tuple (sh x sw). Default: 1
padding: implicit zero padding on the input, a single number or a
tuple (padh x padw). Default: 0
groups: split input into groups, in_channels should be divisible by
the number of groups
output_padding: A zero-padding of 0 <= padding < stride that should be
added to the output. Can be a single number or a tuple. Default: 0
"""
f = ConvNd(_pair(stride), _pair(padding), _pair(1), True,
_pair(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
def conv_transpose3d(input, weight, bias=None, stride=1, padding=0,
output_padding=0, groups=1):
"""Applies a 3D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution"
See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight: filters of shape (in_channels x out_channels x kH x kW)
bias: optional bias of shape (out_channels)
stride: the stride of the convolving kernel, a single number or a
tuple (sh x sw). Default: 1
padding: implicit zero padding on the input, a single number or a
tuple (padh x padw). Default: 0
"""
f = ConvNd(_triple(stride), _triple(padding), _triple(1), True,
_triple(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled)
return f(input, weight, bias)
# Pooling
def avg_pool1d(input, kernel_size, stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
r"""Applies a 1D average pooling over an input signal composed of several
input planes.
See :class:`~torch.nn.AvgPool1d` for details and output shape.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Example:
>>> # pool of square window of size=3, stride=2
>>> input = Variable(torch.Tensor([[[1,2,3,4,5,6,7]]]))
>>> F.avg_pool1d(input, kernel_size=3, stride=2)
Variable containing:
(0 ,.,.) =
2 4 6
[torch.FloatTensor of size 1x1x3]
"""
if input.dim() != 3:
raise ValueError('expected 3D input (got {} dimensions)'
.format(input.dim()))
kernel_size = _single(kernel_size) + (1,)
stride = _single(stride) + (1,) if stride is not None else kernel_size
padding = _single(padding) + (0,)
f = _functions.thnn.AvgPool2d(kernel_size, stride, padding,
ceil_mode, count_include_pad)
return f(input.unsqueeze(3)).squeeze(3)
def avg_pool2d(input, kernel_size, stride=None, padding=0,
ceil_mode=False, count_include_pad=True):
"""Applies 2D average-pooling operation in kh x kw regions by step size
dh x dw steps. The number of output features is equal to the number of
input planes.
See :class:`~torch.nn.AvgPool2d` for details and output shape.
Args:
input: input tensor (minibatch x in_channels x iH x iW)
kernel_size: size of the pooling region, a single number or a
tuple (kh x kw)
stride: stride of the pooling operation, a single number or a
tuple (sh x sw). Default is equal to kernel size
padding: implicit zero padding on the input, a single number or
a tuple (padh x padw), Default: 0
ceil_mode: operation that defines spatial output shape
count_include_pad: divide by the number of elements inside the
original non-padded image or kh * kw
"""
return _functions.thnn.AvgPool2d(kernel_size, stride, padding,
ceil_mode, count_include_pad)(input)
def avg_pool3d(input, kernel_size, stride=None):
"""Applies 3D average-pooling operation in kt x kh x kw regions by step
size kt x dh x dw steps. The number of output features is equal to the
number of input planes / dt.
"""
return _functions.thnn.AvgPool3d(kernel_size, stride)(input)
# share the same interface
def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
return _functions.thnn.MaxPool1d(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)(input)
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
return _functions.thnn.MaxPool2d(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)(input)
def max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
return _functions.thnn.MaxPool3d(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)(input)
def _unpool_output_size(input, kernel_size, stride, padding, output_size):
input_size = input.size()
default_size = []
for d in range(len(kernel_size)):
default_size.append((input_size[d + 2] - 1) * stride[d] +
kernel_size[d] - 2 * padding[d])
if output_size is None:
return default_size
output_size = list(output_size)
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError("output_size should be a sequence containing "
"{} or {} elements, but it has a length of '{}'"
.format(len(kernel_size), len(kernel_size) + 2,
len(output_size)))
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
'invalid output_size "{}" (dim {} must be between {} and {})'
.format(output_size, d, min_size, max_size))
return output_size
def max_unpool1d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool2d(output_size + [1])
return f(input.unsqueeze(3), indices.unsqueeze(3)).squeeze(3)
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool2d(output_size)
return f(input, indices)
def max_unpool3d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
output_size = _unpool_output_size(input, kernel_size, stride, padding,
output_size)
f = _functions.thnn.MaxUnpool3d(output_size, stride, padding)
return f(input, indices)
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
kw, kh = utils._pair(kernel_size)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return out.mul(kw * kh).pow(1. / norm_type)
def adaptive_max_pool1d(input, output_size, return_indices=False):
r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices
"""
return _functions.thnn.AdaptiveMaxPool1d(output_size, return_indices)(input)
def adaptive_max_pool2d(input, output_size, return_indices=False):
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or double-integer tuple)
return_indices: whether to return pooling indices
"""
return _functions.thnn.AdaptiveMaxPool2d(output_size, return_indices)(input)
def adaptive_avg_pool1d(input, output_size):
r"""Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
"""
return _functions.thnn.AdaptiveAvgPool1d(output_size)(input)
def adaptive_avg_pool2d(input, output_size):
r"""Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or double-integer tuple)
"""
return _functions.thnn.AdaptiveAvgPool2d(output_size)(input)
# Activation functions
def dropout(input, p=0.5, training=False, inplace=False):
return _functions.dropout.Dropout(p, training, inplace)(input)
def threshold(input, threshold, value, inplace=False):
return _functions.thnn.auto.Threshold(threshold, value, inplace)(input)
def relu(input, inplace=False):
return _functions.thnn.auto.Threshold(0, 0, inplace)(input)
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
return _functions.thnn.auto.Hardtanh(min_val, max_val, inplace)(input)
def relu6(input, inplace=False):
return _functions.thnn.auto.Hardtanh(0, 6, inplace)(input)
def elu(input, alpha=1., inplace=False):
return _functions.thnn.auto.ELU(alpha, inplace)(input)
def leaky_relu(input, negative_slope=1e-2, inplace=False):
return _functions.thnn.auto.LeakyReLU(negative_slope, inplace)(input)
def prelu(input, weight):
return _functions.thnn.PReLU()(input, weight)
def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False):
return _functions.thnn.RReLU(lower, upper, training, inplace)(input)
def logsigmoid(input):
return _functions.thnn.LogSigmoid()(input)
def hardshrink(input, lambd=0.5):
return _functions.thnn.auto.Hardshrink(lambd)(input)
def tanhshrink(input):
return input - torch.tanh(input)
def softsign(input):
return _functions.activation.Softsign()(input)
def softplus(input, beta=1, threshold=20):
return _functions.thnn.auto.Softplus(beta, threshold)(input)
def softmin(input):
return _functions.thnn.Softmin()(input)
def softmax(input):
return _functions.thnn.auto.Softmax()(input)
def softshrink(input, lambd=0.5):
return _functions.thnn.auto.Softshrink(lambd)(input)
def log_softmax(input):
return _functions.thnn.LogSoftmax()(input)
def tanh(input):
return torch.tanh(input)
def sigmoid(input):
return torch.sigmoid(input)
# etc.
def linear(input, weight, bias=None):
state = _functions.linear.Linear()
return bias and state(input, weight, bias) or state(input, weight)
def batch_norm(input, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
f = torch._C._functions.BatchNorm(running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled)
return f(input, weight, bias)
# loss
def nll_loss(input, target, weight=None, size_average=True):
r"""The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes`
target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
weight (Variable, optional): a manual rescaling weight given to each
class. If given, has to be a Variable of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
Attributes:
weight: the class-weights given as input to the constructor
Example:
>>> # input is of size nBatch x nClasses = 3 x 5
>>> input = autograd.Variable(torch.randn(3, 5))
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor([1, 0, 4]))
>>> output = F.nll_loss(F.log_softmax(input), target)
>>> output.backward()
"""
dim = input.dim()
if dim == 2:
f = _functions.thnn.NLLLoss(size_average, weight=weight)
elif dim == 4:
f = _functions.thnn.NLLLoss2d(size_average, weight=weight)
else:
raise ValueError('Expected 2 or 4 dimensions (got {})'.format(dim))
return f(input, target)
def kl_div(input, target, size_average=True):
r"""The `Kullback-Leibler divergence`_ Loss.
See :class:`~torch.nn.KLDivLoss` for details.
Args:
input: Variable of arbitrary shape
target: Variable of the same shape as input
size_average: if True the output is divided by the number of elements
in input tensor
"""
return _functions.thnn.KLDivLoss(size_average)(input, target)
def cross_entropy(input, target, weight=None, size_average=True):
r"""This criterion combines `log_softmax` and `nll_loss` in one single class.
See :class:`torch.nn.CrossEntropyLoss` for details.
Args:
input: Variable :math:`(N, C)` where `C = number of classes`
target: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1`
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
"""
return nll_loss(log_softmax(input), target, weight, size_average)
def binary_cross_entropy(input, target, weight=None, size_average=True):
r"""Function that measures the Binary Cross Entropy
between the target and the output:
See :class:`~torch.nn.BCELoss` for details.
Args:
input: Variable of arbitrary shape
target: Variable of the same shape as input
weight (Variable, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch.
"""
return _functions.thnn.BCELoss(size_average, weight=weight)(input, target)
def smooth_l1_loss(input, target, size_average=True):
return _functions.thnn.SmoothL1Loss(size_average)(input, target)
def pixel_shuffle(input, upscale_factor):
r"""Rearranges elements in a tensor of shape ``[*, C*r^2, H, W]`` to a
tensor of shape ``[C, H*r, W*r]``.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Variable): Input
upscale_factor (int): factor to increase spatial resolution by
Examples:
>>> ps = nn.PixelShuffle(3)
>>> input = autograd.Variable(torch.Tensor(1, 9, 4, 4))
>>> output = ps(input)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
"""
batch_size, channels, in_height, in_width = input.size()
channels //= upscale_factor ** 2
out_height = in_height * upscale_factor
out_width = in_width * upscale_factor
input_view = input.contiguous().view(
batch_size, channels, upscale_factor, upscale_factor,
in_height, in_width)
shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return shuffle_out.view(batch_size, channels, out_height, out_width)
def upsample_nearest(input, size=None, scale_factor=None):
"""Upsamples the input, using nearest neighbours' pixel values.
Currently only spatial upsampling is supported (i.e. expected inputs
are 4 dimensional).
Args:
input (Variable): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
return _functions.thnn.UpsamplingNearest2d(size, scale_factor)(input)
def upsample_bilinear(input, size=None, scale_factor=None):
"""Upscales the input, using the bilinear upsampling.
Currently only spatial upsampling is supported (i.e. expected inputs
are 4 dimensional).
Args:
input (Variable): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
return _functions.thnn.UpsamplingBilinear2d(size, scale_factor)(input)
def pad(input, pad, mode='constant', value=0):
"""Pads tensor.
Currently only 2D and 3D padding supported.
In case of 4D input tensor pad should be in form (pad_l, pad_r, pad_t, pad_b )
In case of 5D pad should be (pleft, pright, ptop, pbottom, pfront, pback)
Args:
input (Variable): 4D or 5D tensor
pad (tuple): 4-elem or 6-elem tuple
mode: 'constant', 'reflect' or 'replicate'
value: fill value for 'constant' padding
"""
if input.dim() == 4:
assert len(pad) == 4, '4D tensors expect 4 values for padding'
if mode == 'constant':
return ConstantPad2d(pad, value)(input)
elif mode == 'reflect':
return _functions.thnn.ReflectionPad2d(*pad)(input)
elif mode == 'replicate':
return _functions.thnn.ReplicationPad2d(*pad)(input)
elif input.dim() == 5:
assert len(pad) == 6, '5D tensors expect 6 values for padding'
if mode == 'constant':
raise NotImplementedError
elif mode == 'reflect':
raise NotImplementedError
elif mode == 'replicate':
return _functions.thnn.ReplicationPad3d(*pad)(input)
else:
raise NotImplementedError("Only 4D and 5D padding is supported for now")
# distance
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x (Tensor): input tensor containing the two input batches
p (real): the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1)
return torch.pow(out, 1. / p)
| bsd-3-clause | 8,468,179,904,864,877,000 | 36.502297 | 119 | 0.644003 | false |
igord-daynix/virt-test | shared/deps/serial/serial_host_send_receive.py | 9 | 6462 | #!/usr/bin/python
import os
import socket
import struct
import optparse
try:
import hashlib
except ImportError:
import md5
class Md5MissMatch(Exception):
def __init__(self, md5_pre, md5_post):
Exception.__init__(self, md5_pre, md5_post)
self.md5_pre = md5_pre
self.md5_post = md5_post
def __str__(self):
return ("Md5 miss match. Original md5 = %s, current md5 = %s" %
(self.md5_pre, self.md5_post))
class ShakeHandError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
self.msg = msg
def __str__(self):
return ("Shake hand fail. %s" % self.msg)
def md5_init(data=None):
"""
Returns md5. This function is implemented in order to encapsulate hash
objects in a way that is compatible with python 2.4 and python 2.6
without warnings.
Note that even though python 2.6 hashlib supports hash types other than
md5 and sha1, we are artificially limiting the input values in order to
make the function to behave exactly the same among both python
implementations.
:param data: Optional input string that will be used to update the hash.
"""
try:
md5_value = hashlib.new("md5")
except NameError:
md5_value = md5.new()
if data:
md5_value.update(data)
return md5_value
def get_md5(filename, size=None):
"""
Calculate the hash of filename.
If size is not None, limit to first size bytes.
Throw exception if something is wrong with filename.
Can be also implemented with bash one-liner (assuming size%1024==0):
dd if=filename bs=1024 count=size/1024 | sha1sum -
:param filename: Path of the file that will have its hash calculated.
:param method: Method used to calculate the hash. Supported methods:
* md5
* sha1
:return: Hash of the file, if something goes wrong, return None.
"""
chunksize = 4096
fsize = os.path.getsize(filename)
if not size or size > fsize:
size = fsize
f = open(filename, 'rb')
md5_value = md5_init()
while size > 0:
if chunksize > size:
chunksize = size
data = f.read(chunksize)
if len(data) == 0:
print("Nothing left to read but size=%d" % size)
break
md5_value.update(data)
size -= len(data)
f.close()
return md5_value.hexdigest()
def shake_hand(connect, size=0, action="receive"):
hi_str = struct.pack("2s", "HI")
hi_str_len = len(hi_str)
if action == "send":
connect.send(hi_str)
txt = connect.recv(hi_str_len)
hi_str = struct.unpack("2s", txt)[0]
if hi_str != "HI":
raise ShakeHandError("Fail to get HI from guest.")
size_str = struct.pack("q", size)
connect.send(size_str)
txt = connect.recv(3)
ack_str = struct.unpack("3s", txt)[0]
if ack_str != "ACK":
raise ShakeHandError("Guest did not ACK the file size message.")
return size
elif action == "receive":
txt = connect.recv(hi_str_len)
hi_str = struct.unpack("2s", txt)[0]
if hi_str != "HI":
raise ShakeHandError("Fail to get HI from guest.")
connect.send(hi_str)
size = connect.recv(8)
if size:
size = struct.unpack("q", size)[0]
txt = struct.pack("3s", "ACK")
connect.send(txt)
return size
def receive(connect, filename, p_size=1024):
recv_size = 0
size = shake_hand(connect, action="receive")
if p_size < int(size):
p_szie = int(size)
md5_value = md5_init()
file_no = open(filename, 'wb')
try:
while recv_size < size:
txt = connect.recv(p_size)
file_no.write(txt)
md5_value.update(txt)
recv_size += len(txt)
finally:
file_no.close()
md5_sum = md5_value.hexdigest()
return md5_sum
def send(connect, filename, p_size=1024):
recv_size = 0
f_size = os.path.getsize(filename)
shake_hand(connect, f_size, action="send")
md5_value = md5_init()
file_no = open(filename, 'rb')
try:
while recv_size < f_size:
txt = file_no.read(p_size)
connect.send(txt)
md5_value.update(txt)
recv_size += len(txt)
finally:
print("received size = %s" % recv_size)
file_no.close()
md5_sum = md5_value.hexdigest()
return md5_sum
def main():
parser = optparse.OptionParser("Transfer data between guest and host"
"through virtio serial. Please make sure"
"VirtIOChannel.py run in guest first.")
parser.add_option("-s", "--socket", dest="socket",
help="unix socket device used in qemu command"
"eg:your CLI:-chardev socket,id=channel2,"
"path=/tmp/helloworld2 ,then input"
"'/tmp/helloworld2' here")
parser.add_option("-f", "--filename", dest="filename",
help="File transfer to guest or save data to.")
parser.add_option("-a", "--action", dest="action", default="send",
help="Send data out or receive data.")
parser.add_option("-p", "--package", dest="package", default=1024,
help="Package size during file transfer.")
options, args = parser.parse_args()
if options.socket:
sock = options.socket
else:
parser.error("Please set -s parameter.")
if options.filename:
filename = options.filename
else:
parser.error("Please set -f parameter.")
action = options.action
p_size = options.package
vport = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
vport.connect(sock)
if action == "receive":
md5_sum = receive(vport, filename, p_size=p_size)
print("md5_sum = %s" % md5_sum)
elif action == "send":
md5_sum = send(vport, filename, p_size=p_size)
print("md5_sum = %s" % md5_sum)
else:
md5_ori = send(vport, filename, p_size=p_size)
print("md5_original = %s" % md5_ori)
md5_post = receive(vport, filename, p_size=p_size)
print("md5_post = %s" % md5_post)
if md5_ori != md5_post:
raise Md5MissMatch(md5_ori, md5_post)
if __name__ == "__main__":
main()
| gpl-2.0 | -81,704,808,578,228,690 | 29.771429 | 76 | 0.57459 | false |
raulanatol/awsebcli | ebcli/core/completer.py | 1 | 4631 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from cement.core import controller, handler
from ..core import fileoperations, operations, io
from ..lib import aws
class CompleterController(controller.CementBaseController):
class Meta:
label = 'completer'
stacked_on = 'base'
stacked_type = 'nested'
description = 'auto-completer: hidden command'
arguments = [
(['--cmplt'], dict(help='command list so far')),
]
hide = True
@controller.expose(hide=True)
def default(self):
"""
Creates a space separated list of possible completions.
We actually do not need to calculate the completions. We can simply
just generate a list of ALL possibilities, and then the bash completer
module is smart enough to filter out the ones that don't match/
Results must be printed through stdin for the completer module
to read then.
"""
commands = self.app.pargs.cmplt.strip('"')
# Get commands, filter out last one
commands = commands.split(' ')
word_so_far = commands[-1]
commands = commands[0:-1]
commands = list(filter(lambda x: len(x) > 0, commands))
#Get the list of controllers
self.controllers = handler.list('controller')
self._filter_controllers()
ctrlr = self._get_desired_controller(commands)
if not ctrlr:
return # command entered so far is invalid, we dont need to
## worry about completion
if word_so_far.startswith('--'):
# Get all base option flags
self.complete_options(ctrlr)
else:
if ctrlr == self.base_controller:
# Get standard command list
io.echo(*[c.Meta.label for c in self.controllers])
else:
# A command has been provided. Complete at a deeper level
ctrlr = ctrlr() # Instantiate so we can read all arguments
if not hasattr(ctrlr, 'complete_command'):
return # Controller does not support completion
try:
#Set up aws profile just in case we need to make a service call
profile = fileoperations.get_default_profile()
if profile:
aws.set_profile(profile)
ctrlr.complete_command(commands)
except:
#We want to swallow ALL exceptions. We can
## not print any output when trying to tab-complete
## because any output gets passed to the user as
## completion candidates
### Exceptions here are normally thrown because the service
### can not be contacted for things such as environment
### list and solution stack list. Typically, credentials
### are not set up yet
pass
def complete_options(self, controller):
# Get all base options (excluding the one for this controller)
base_options = [c.option_strings[-1] for c in self.app.args._actions if
c.option_strings[-1] != '--cmplt']
controller_options = [o[0][-1] for o in controller()._meta.arguments if
o[0][-1].startswith('--')]
io.echo(*base_options + controller_options)
def _get_desired_controller(self, commands):
if len(commands) < 1:
return self.base_controller
else:
return next((c for c in self.controllers if
c.Meta.label == commands[0]), None)
def _filter_controllers(self):
#filter out unwanted controllers
self.base_controller = next((c for c in self.controllers if
c.Meta.label == 'base'), None)
self.controllers = [c for c in self.controllers if
c.Meta.label != 'base' and
c.Meta.label != 'completer'] | apache-2.0 | 137,320,620,154,443,020 | 39.278261 | 83 | 0.581948 | false |
msmolens/VTK | Charts/Core/Testing/Python/TestLinePlot.py | 26 | 2094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import vtk
import vtk.test.Testing
import math
class TestLinePlot(vtk.test.Testing.vtkTest):
def testLinePlot(self):
"Test if line plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0,1.0,1.0)
view.GetRenderWindow().SetSize(400,300)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
# Create a table with some points in it
table = vtk.vtkTable()
arrX = vtk.vtkFloatArray()
arrX.SetName("X Axis")
arrC = vtk.vtkFloatArray()
arrC.SetName("Cosine")
arrS = vtk.vtkFloatArray()
arrS.SetName("Sine")
arrS2 = vtk.vtkFloatArray()
arrS2.SetName("Sine2")
numPoints = 69
inc = 7.5 / (numPoints - 1)
for i in range(0,numPoints):
arrX.InsertNextValue(i*inc)
arrC.InsertNextValue(math.cos(i * inc) + 0.0)
arrS.InsertNextValue(math.sin(i * inc) + 0.0)
arrS2.InsertNextValue(math.sin(i * inc) + 0.5)
table.AddColumn(arrX)
table.AddColumn(arrC)
table.AddColumn(arrS)
table.AddColumn(arrS2)
# Now add the line plots with appropriate colors
line = chart.AddPlot(0)
line.SetInputData(table,0,1)
line.SetColor(0,255,0,255)
line.SetWidth(1.0)
line = chart.AddPlot(0)
line.SetInputData(table,0,2)
line.SetColor(255,0,0,255)
line.SetWidth(5.0)
line = chart.AddPlot(0)
line.SetInputData(table,0,3)
line.SetColor(0,0,255,255)
line.SetWidth(4.0)
view.GetRenderWindow().SetMultiSamples(0)
view.GetRenderWindow().Render()
img_file = "TestLinePlot.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestLinePlot, 'test')])
| bsd-3-clause | -7,446,298,674,415,161,000 | 27.684932 | 117 | 0.599331 | false |
aronparsons/spacewalk | client/tools/rhnpush/rhnpush_confmanager.py | 4 | 4950 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import rhnpush_config
import utils
import sys
import os
class ConfManager:
def __init__(self, optionparser, store_true_list):
sysdir = '/etc/sysconfig/rhn'
homedir = utils.get_home_dir()
default = 'rhnpushrc'
regular = '.rhnpushrc'
deffile = os.path.join(sysdir, default)
regfile = os.path.join(homedir, regular)
cwdfile = os.path.join(os.getcwd(), regular)
self.cfgFileList = [deffile, regfile, cwdfile]
self.defaultconfig = rhnpush_config.rhnpushConfigParser(ensure_consistency=True)
# Get a reference to the object containing command-line options
self.cmdconfig = optionparser
self.store_true_list = store_true_list
# Change the files options of the self.userconfig
# Change the exclude options of the self.userconfig
def _files_to_list(self):
# Change the files options to lists.
if (self.defaultconfig.__dict__.has_key('files') and
not isinstance(self.defaultconfig.files, type([]))):
self.defaultconfig.files = [x.strip() for x in
self.defaultconfig.files.split(',')]
# Change the exclude options to list.
if (self.defaultconfig.__dict__.has_key('exclude') and
not isinstance(self.defaultconfig.__dict__['exclude'], type([]))):
self.defaultconfig.exclude = [x.strip() for x in
self.defaultconfig.exclude.split(',')]
def get_config(self):
for f in self.cfgFileList:
if os.access(f, os.F_OK):
if not os.access(f, os.R_OK):
print "rhnpush does not have read permission on %s" % f
sys.exit(1)
config2 = rhnpush_config.rhnpushConfigParser(f)
self.defaultconfig, config2 = utils.make_common_attr_equal(self.defaultconfig, config2)
self._files_to_list()
# Change the channel string into a list of strings.
# pylint: disable=E1103
if not self.defaultconfig.channel:
# if no channel then make it null array instead of
# an empty string array from of size 1 [''] .
self.defaultconfig.channel = []
else:
self.defaultconfig.channel = [x.strip() for x in
self.defaultconfig.channel.split(',')]
# Get the command line arguments. These take precedence over the other settings
argoptions, files = self.cmdconfig.parse_args()
# Makes self.defaultconfig compatible with argoptions by changing all '0' value attributes to None.
_zero_to_none(self.defaultconfig, self.store_true_list)
# If verbose isn't set at the command-line, it automatically gets set to zero. If it's at zero, change it to
# None so the settings in the config files take precedence.
if argoptions.verbose == 0:
argoptions.verbose = None
# Orgid, count, cache_lifetime, and verbose all need to be integers, just like in argoptions.
if self.defaultconfig.orgid:
self.defaultconfig.orgid = int(self.defaultconfig.orgid)
if self.defaultconfig.count:
self.defaultconfig.count = int(self.defaultconfig.count)
if self.defaultconfig.cache_lifetime:
self.defaultconfig.cache_lifetime = int(self.defaultconfig.cache_lifetime)
if self.defaultconfig.verbose:
self.defaultconfig.verbose = int(self.defaultconfig.verbose)
if self.defaultconfig.timeout:
self.defaultconfig.timeout = int(self.defaultconfig.timeout)
# Copy the settings in argoptions into self.defaultconfig.
self.defaultconfig, argoptions = utils.make_common_attr_equal(self.defaultconfig, argoptions)
# Make sure files is in the correct format.
if self.defaultconfig.files != files:
self.defaultconfig.files = files
return self.defaultconfig
# Changes every option in config that is also in store_true_list that is set to '0' to None
def _zero_to_none(config, store_true_list):
for opt in config.keys():
for cmd in store_true_list:
if str(opt) == cmd and config.__dict__[opt] == '0':
config.__dict__[opt] = None
| gpl-2.0 | -7,685,748,045,511,053,000 | 40.949153 | 116 | 0.640202 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.