text
stringlengths 4
1.02M
| meta
dict |
|---|---|
# Requires admin role
# If you want to do all three tasks at once, see migrateAccount or migrateAccounts functions
from agoTools.admin import Admin
agoAdmin = Admin(<username>) # Replace <username> with your admin username
agoAdmin.reassignAllUser1ItemsToUser2(agoAdmin, <userFrom>, <userTo>) #Replace with your current and new account usernames
agoAdmin.reassignAllGroupOwnership(agoAdmin, <userFrom>, <userTo>)
agoAdmin.addUser2ToAllUser1Groups(agoAdmin, <userFrom>, <userTo>)
|
{
"content_hash": "86a669f9ca6b1f8f15006a0db1b46467",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 123,
"avg_line_length": 49.2,
"alnum_prop": 0.7886178861788617,
"repo_name": "ecaldwell/ago-tools",
"id": "57cef0edb5e527bba2b1760594d954cabf507e47",
"size": "613",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "samples/moveItemsReassignGroups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69082"
}
],
"symlink_target": ""
}
|
"""
Note: This tests doesn't do much any more as
_determine space has become too complex and needs
more environment information to get fully tested.
"""
from tiddlywebplugins.tiddlyspace.handler import determine_space
config = {
'server_host': {
'scheme': 'http',
'host': '0.0.0.0',
'port': '8080',
},
}
environ = {'tiddlyweb.config': config}
def test_simple_space():
space = determine_space(environ, 'foo.0.0.0.0:8080')
assert space == 'foo'
space = determine_space(environ, 'foo.bar.0.0.0.0:8080')
assert space == 'foo.bar'
|
{
"content_hash": "583b965ebf396c0d8e4c376b58fa8940",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 64,
"avg_line_length": 25.5,
"alnum_prop": 0.6062091503267973,
"repo_name": "FND/tiddlyspace",
"id": "3850ebeb0ff7c00a5f7cfe1015f299cf2e333b54",
"size": "612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_determine_space.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "270642"
},
{
"name": "Python",
"bytes": "216402"
},
{
"name": "Shell",
"bytes": "1249"
}
],
"symlink_target": ""
}
|
import fetch_api
import rospy
def print_usage():
print 'Moves the torso to a certain height between [0.0, 0.4]'
print 'Usage: rosrun applications torso_demo.py 0.4'
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
def main():
rospy.init_node('torso_demo')
wait_for_time()
argv = rospy.myargv()
if len(argv) < 2:
print_usage()
return
height = float(argv[1])
torso = fetch_api.Torso()
torso.set_height(height)
if __name__ == '__main__':
main()
|
{
"content_hash": "e2079a8c74268d794a1a98ce67bf15b1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 18.870967741935484,
"alnum_prop": 0.582905982905983,
"repo_name": "hcrlab/access_teleop",
"id": "523c21e35b18af5cb2171c6ca81e529ede6f7a28",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cse481wi18/applications/scripts/torso_demo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "147"
},
{
"name": "C++",
"bytes": "864037"
},
{
"name": "CMake",
"bytes": "98090"
},
{
"name": "CSS",
"bytes": "13370"
},
{
"name": "HTML",
"bytes": "67713"
},
{
"name": "JavaScript",
"bytes": "134005"
},
{
"name": "OpenEdge ABL",
"bytes": "66320"
},
{
"name": "Python",
"bytes": "822863"
},
{
"name": "Shell",
"bytes": "1567"
}
],
"symlink_target": ""
}
|
"""
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import annotations
import functools
import typing
from collections import defaultdict
from ..combinatorics.tensor_can import (bsgs_direct_product, canonicalize,
get_symmetric_group_sgs, riemann_bsgs)
from ..core import (Add, Basic, Integer, Rational, Symbol, Tuple, symbols,
sympify)
from ..core.sympify import CantSympify
from ..external import import_module
from ..matrices import Matrix, eye
from ..utilities.decorator import doctest_depends_on
class TIDS(CantSympify):
"""
Tensor-index data structure. This contains internal data structures about
components of a tensor expression, its free and dummy indices.
To create a ``TIDS`` object via the standard constructor, the required
arguments are
WARNING: this class is meant as an internal representation of tensor data
structures and should not be directly accessed by end users.
Parameters
==========
components : ``TensorHead`` objects representing the components of the tensor expression.
free : Free indices in their internal representation.
dum : Dummy indices in their internal representation.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0 m1 m2 m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS([T], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
Notes
=====
In short, this has created the components, free and dummy indices for
the internal representation of a tensor T(m0, m1, -m1, m3).
Free indices are represented as a list of triplets. The elements of
each triplet identify a single free index and are
1. TensorIndex object
2. position inside the component
3. component number
Dummy indices are represented as a list of 4-plets. Each 4-plet stands
for couple for contracted indices, their original TensorIndex is not
stored as it is no longer required. The four elements of the 4-plet
are
1. position inside the component of the first index.
2. position inside the component of the second index.
3. component number of the first index.
4. component number of the second index.
"""
def __init__(self, components, free, dum):
self.components = components
self.free = free
self.dum = dum
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: (x[2], x[0]))
def get_tensors(self):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
indices = self.get_indices()
components = self.components
tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def get_components_with_free_indices(self):
"""
Get a list of components with their associated indices.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0 m1 m2 m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> t = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> t.get_components_with_free_indices()
[(T(Lorentz,Lorentz,Lorentz,Lorentz), [(m0, 0, 0), (m3, 3, 0)])]
>>> t2 = (A(m0)*A(-m0))._tids
>>> t2.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [])]
>>> t3 = (A(m0)*A(-m1)*A(-m0)*A(m1))._tids
>>> t3.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), []), (A(Lorentz), [])]
>>> t4 = (A(m0)*A(m1)*A(-m0))._tids
>>> t4.get_components_with_free_indices()
[(A(Lorentz), []), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [])]
>>> t5 = (A(m0)*A(m1)*A(m2))._tids
>>> t5.get_components_with_free_indices()
[(A(Lorentz), [(m0, 0, 0)]), (A(Lorentz), [(m1, 0, 1)]), (A(Lorentz), [(m2, 0, 2)])]
"""
components = self.components
ret_comp = []
free_counter = 0
if len(self.free) == 0:
return [(comp, []) for comp in components]
for i, comp in enumerate(components):
c_free = []
while free_counter < len(self.free):
if not self.free[free_counter][2] == i:
break
c_free.append(self.free[free_counter])
free_counter += 1
if free_counter >= len(self.free):
break
ret_comp.append((comp, c_free))
return ret_comp
@staticmethod
def from_components_and_indices(components, indices):
"""
Create a new ``TIDS`` object from ``components`` and ``indices``
``components`` ``TensorHead`` objects representing the components
of the tensor expression.
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0 m1 m2 m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz)], [(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
In case of many components the same indices have slightly different
indexes:
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> TIDS.from_components_and_indices([A]*4, [m0, m1, -m1, m3])
TIDS([A(Lorentz), A(Lorentz), A(Lorentz), A(Lorentz)], [(m0, 0, 0), (m3, 0, 3)], [(0, 0, 1, 2)])
"""
tids = None
cur_pos = 0
for i in components:
tids_sing = TIDS([i], *TIDS.free_dum_from_indices(*indices[cur_pos:cur_pos+i.rank]))
if tids is None:
tids = tids_sing
else:
tids *= tids_sing
cur_pos += i.rank
if tids is None:
tids = TIDS([], [], [])
tids.free.sort(key=lambda x: x[0].name)
tids.dum.sort()
return tids
@staticmethod
def free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0 m1 m2 m3', Lorentz)
>>> TIDS.free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0, 0), (m3, 3, 0)], [(1, 2, 0, 0)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0, 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index._name
typ = index._tensortype
contr = index._is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError(f'two equal contravariant indices in slots {pos:d} and {i:d}')
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError(f'two equal covariant indices in slots {pos:d} and {i:d}')
if contr:
dum.append((i, pos, 0, 0))
else:
dum.append((pos, i, 0, 0))
else:
index_dict[(name, typ)] = index._is_up, i
free = [(index, i, 0) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
@staticmethod
def _check_matrix_indices(f_free, g_free, nc1):
# This "private" method checks matrix indices.
# Matrix indices are special as there are only two, and observe
# anomalous substitution rules to determine contractions.
dum = []
# make sure that free indices appear in the same order as in their component:
f_free.sort(key=lambda x: (x[2], x[1]))
g_free.sort(key=lambda x: (x[2], x[1]))
matrix_indices_storage = {}
transform_right_to_left = {}
f_pop_pos = []
g_pop_pos = []
for free_pos, (ind, i, c) in enumerate(f_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
matrix_indices_storage[ind] = (free_pos, i, c)
for free_pos, (ind, i, c) in enumerate(g_free):
index_type = ind._tensortype
if ind not in (index_type.auto_left, -index_type.auto_right):
continue
if ind == index_type.auto_left:
if -index_type.auto_right in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(-index_type.auto_right)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
continue
if ind in matrix_indices_storage:
other_pos, other_i, other_c = matrix_indices_storage.pop(ind)
dum.append((other_i, i, other_c, c + nc1))
# mark to remove other_pos and free_pos from free:
g_pop_pos.append(free_pos)
f_pop_pos.append(other_pos)
transform_right_to_left[-index_type.auto_right] = c
continue
if ind in transform_right_to_left:
other_c = transform_right_to_left.pop(ind)
if c == other_c:
g_free[free_pos] = (index_type.auto_left, i, c)
for i in sorted(f_pop_pos, reverse=True):
f_free.pop(i)
for i in sorted(g_pop_pos, reverse=True):
g_free.pop(i)
return dum
@staticmethod
def mul(f, g):
"""
The algorithms performing the multiplication of two ``TIDS`` instances.
In short, it forms a new ``TIDS`` object, joining components and indices,
checking that abstract indices are compatible, and possibly contracting
them.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2, m3 = tensor_indices('m0 m1 m2 m3', Lorentz)
>>> T = tensorhead('T', [Lorentz]*4, [[1]*4])
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> tids_1 = TIDS.from_components_and_indices([T], [m0, m1, -m1, m3])
>>> tids_2 = TIDS.from_components_and_indices([A], [m2])
>>> tids_1 * tids_2
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],
[(m0, 0, 0), (m3, 3, 0), (m2, 0, 1)], [(1, 2, 0, 0)])
In this case no contraction has been performed.
>>> tids_3 = TIDS.from_components_and_indices([A], [-m3])
>>> tids_1 * tids_3
TIDS([T(Lorentz,Lorentz,Lorentz,Lorentz), A(Lorentz)],
[(m0, 0, 0)], [(1, 2, 0, 0), (3, 0, 0, 1)])
Free indices ``m3`` and ``-m3`` are identified as a contracted couple, and are
therefore transformed into dummy indices.
A wrong index construction (for example, trying to contract two
contravariant indices or using indices multiple times) would result in
an exception:
>>> tids_4 = TIDS.from_components_and_indices([A], [m3])
>>> # This raises an exception:
>>> # tids_1 * tids_4
"""
def index_up(u):
return u if u.is_up else -u
f_free = f.free[:]
g_free = g.free[:]
nc1 = len(f.components)
dum = TIDS._check_matrix_indices(f_free, g_free, nc1)
# find out which free indices of f and g are contracted
free_dict1 = {i if i.is_up else -i: (pos, cpos, i) for i, pos, cpos in f_free}
free_dict2 = {i if i.is_up else -i: (pos, cpos, i) for i, pos, cpos in g_free}
free_names = set(free_dict1) & set(free_dict2)
# find the new `free` and `dum`
dum2 = [(i1, i2, c1 + nc1, c2 + nc1) for i1, i2, c1, c2 in g.dum]
free1 = [(ind, i, c) for ind, i, c in f_free if index_up(ind) not in free_names]
free2 = [(ind, i, c + nc1) for ind, i, c in g_free if index_up(ind) not in free_names]
free = free1 + free2
dum.extend(f.dum + dum2)
for name in free_names:
ipos1, cpos1, ind1 = free_dict1[name]
ipos2, cpos2, ind2 = free_dict2[name]
cpos2 += nc1
if ind1._is_up == ind2._is_up:
raise ValueError(f'wrong index construction {ind1}')
if ind1._is_up:
new_dummy = (ipos1, ipos2, cpos1, cpos2)
else:
new_dummy = (ipos2, ipos1, cpos2, cpos1)
dum.append(new_dummy)
return f.components + g.components, free, dum
def __mul__(self, other):
return TIDS(*self.mul(self, other))
def __str__(self):
from ..printing import sstr
return f'TIDS({sstr(self.components)}, {sstr(self.free)}, {sstr(self.dum)})'
def sorted_components(self):
"""
Returns a ``TIDS`` with sorted components
The sorting is done taking into account the commutation group
of the component tensors.
"""
from ..combinatorics.permutations import _af_invert
cv = list(zip(self.components, range(len(self.components))))
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1][0].commutes_with(cv[j][0])
if c not in [0, 1]:
continue
if (cv[j-1][0]._types, cv[j-1][0]._name) > \
(cv[j][0]._types, cv[j][0]._name):
cv[j-1], cv[j] = cv[j], cv[j-1]
if c:
sign = -sign
# perm_inv[new_pos] = old_pos
components = [x[0] for x in cv]
perm_inv = [x[1] for x in cv]
perm = _af_invert(perm_inv)
free = [(ind, i, perm[c]) for ind, i, c in self.free]
free.sort()
dum = [(i1, i2, perm[c1], perm[c2]) for i1, i2, c1, c2 in self.dum]
dum.sort(key=lambda x: components[x[2]].index_types[x[0]])
return TIDS(components, free, dum), sign
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: (x[2], x[0]))
def canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py``
"""
# to be called after sorted_components
from ..combinatorics.permutations import _af_new
# types = list(set(self._types))
# types.sort(key = lambda x: x._name)
n = self._ext_rank
g = [None]*n + [n, n+1]
pos = 0
vpos = []
components = self.components
for t in components:
vpos.append(pos)
pos += t._rank
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (_, ipos, cpos) in enumerate(self._get_sorted_free_indices_for_canon()):
pos = vpos[cpos] + ipos
g[pos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2, cpos1, cpos2 in self._get_sorted_dum_indices_for_canon():
pos1 = vpos[cpos1] + ipos1
pos2 = vpos[cpos2] + ipos2
g[pos1] = j
g[pos2] = j + 1
j += 2
typ = components[cpos1].index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(typ.metric_antisym)
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h._comm in (0, 1):
comm = h._comm
else:
comm = TensorManager.get_comm(h._comm, h._comm)
v.append((h._symmetry.base, h._symmetry.generators, n, comm))
return _af_new(g), dummies, msym, v
def perm2tensor(self, g, canon_bp=False):
"""
Returns a ``TIDS`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
vpos = []
components = self.components
pos = 0
for t in components:
vpos.append(pos)
pos += t._rank
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*4 for i in range((rank - nfree)//2)]
free = []
icomp = -1
for i in range(rank):
if i in vpos:
icomp += vpos.count(i)
pos0 = i
ipos = i - pos0
gi = g[i]
if gi < nfree:
ind = sorted_free[gi]
free.append((ind, ipos, icomp))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = ipos
dum[idum][3] = icomp
else:
dum[idum][0] = ipos
dum[idum][2] = icomp
dum = [tuple(x) for x in dum]
return TIDS(components, free, dum)
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
components = self.components
free = self.free
dum = self.dum
indices = [None]*self._ext_rank
start = 0
pos = 0
vpos = []
for t in components:
vpos.append(pos)
pos += t.rank
cdt = defaultdict(int)
# if the free indices have names with dummy_fmt, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos, cpos in free:
if indx._name.split('_')[0] == indx._tensortype._dummy_fmt[:-3]:
cdt[indx._tensortype] = max(cdt[indx._tensortype], int(indx._name.split('_')[1]) + 1)
start = vpos[cpos]
indices[start + ipos] = indx
for ipos1, ipos2, cpos1, cpos2 in dum:
start1 = vpos[cpos1]
start2 = vpos[cpos2]
typ1 = components[cpos1].index_types[ipos1]
assert typ1 == components[cpos2].index_types[ipos2]
fmt = typ1._dummy_fmt
nd = cdt[typ1]
indices[start1 + ipos1] = TensorIndex(fmt % nd, typ1)
indices[start2 + ipos2] = TensorIndex(fmt % nd, typ1, False)
cdt[typ1] += 1
return indices
def contract_metric(self, g):
"""
Returns new TIDS and sign.
Sign is either 1 or -1, to correct the sign after metric contraction
(for spinor indices).
"""
components = self.components
antisym = g.index_types[0].metric_antisym
# if not any(x == g for x in components):
# return self
# list of positions of the metric ``g``
gpos = [i for i, x in enumerate(components) if x == g]
if not gpos:
return self, 1
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if x[-1] == gposx]
dum1 = [x for x in dum if gposx in (x[-2], x[-1])]
if not dum1:
continue
elim.add(gposx)
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if dum10[3] == gposx:
# the index with pos p0 and component c0 is contravariant
c0 = dum10[2]
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 and component c0 is covariant
c0 = dum10[3]
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if dum11[3] == gposx:
# the index with pos p1 and component c1 is contravariant
c1 = dum11[2]
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 and component c1 is covariant
c1 = dum11[3]
p1 = dum11[1]
dum.append((p0, p1, c0, c1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
else:
p1 = dp0
c1 = dc0
ind, _, c = free1[0]
free.append((ind, p1, c1))
else:
dp0, dp1, dc0, dc1 = dum1[0]
if dc0 == dc1:
# g(i, -i)
typ = g.index_types[0]
if typ._dim is None:
raise ValueError('dimension not assigned')
sign = sign*typ._dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if dc0 == gposx:
p1 = dp1
c1 = dc1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
c1 = dc0
ind, _, c = free1[0]
free.append((ind, p1, c1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
shift = 0
shifts = [0]*len(components)
for i in range(len(components)):
if i in elim:
shift += 1
continue
shifts[i] = shift
free = [(ind, p, c - shifts[c]) for (ind, p, c) in free if c not in elim]
dum = [(p0, p1, c0 - shifts[c0], c1 - shifts[c1]) for i, (p0, p1, c0, c1) in enumerate(dum) if c0 not in elim and c1 not in elim]
components = [c for i, c in enumerate(components) if i not in elim]
tids = TIDS(components, free, dum)
return tids, sign
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of Diofant.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict: dict[typing.Any, typing.Any] = {}
_substitutions_dict_tensmul: dict[typing.Any, typing.Any] = {}
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return
numpy = import_module('numpy')
if not isinstance(dat, numpy.ndarray):
return dat
if dat.ndim == 0:
return dat[()]
elif dat.ndim == 1 and dat.size == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple(i.is_up for i in key.get_indices())
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
return self.data_tensmul_from_tensorhead(key, key.component)
if isinstance(key, TensMul):
tensmul_list = key.split()
if len(tensmul_list) == 1 and len(tensmul_list[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple(i.is_up for i in tensmul_list[0].get_indices())
srch = (tensmul_list[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
data_list = [self.data_tensmul_from_tensorhead(i, i.components[0]) for i in tensmul_list]
if all(i is None for i in data_list):
return
if any(i is None for i in data_list):
raise ValueError('Mixing tensors with associated components '
'data with tensors without components data')
data_result, _ = self.data_product_tensors(data_list, tensmul_list)
return data_result
if isinstance(key, TensAdd):
sumvar = Integer(0)
data_list = []
free_args_list = []
for arg in key.args:
if isinstance(arg, TensExpr):
data_list.append(arg.data)
free_args_list.append([x[0] for x in arg.free])
else:
data_list.append(arg)
free_args_list.append([])
if all(i is None for i in data_list):
return
if any(i is None for i in data_list):
raise ValueError('Mixing tensors with associated components '
'data with tensors without components data')
numpy = import_module('numpy')
for data, free_args in zip(data_list, free_args_list):
if len(free_args) < 2:
sumvar += data
else:
free_args_pos = {y: x for x, y in enumerate(free_args)}
axes = [free_args_pos[arg] for arg in key.free_args]
sumvar += numpy.transpose(data, axes)
return sumvar
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is not None:
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_tensmul_from_tensorhead(self, tensmul, tensorhead):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
if tensorhead.data is not None:
return self._correct_signature_from_indices(
tensorhead.data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum)
def data_product_tensors(self, data_list, tensmul_list):
"""
Given a ``data_list``, list of ``ndarray``'s and a ``tensmul_list``,
list of ``TensMul`` instances, compute the resulting ``ndarray``,
after tensor products and contractions.
"""
def data_mul(f, g):
"""
Multiplies two ``ndarray`` objects, it first calls ``TIDS.mul``,
then checks which indices have been contracted, and finally
contraction operation on data, according to the contracted indices.
"""
data1, tensmul1 = f
data2, tensmul2 = g
components, free, dum = TIDS.mul(tensmul1, tensmul2)
data = _TensorDataLazyEvaluator._contract_ndarray(tensmul1.free, tensmul2.free, data1, data2)
# TODO: do this more efficiently... maybe by just passing an index list
# to .data_product_tensor(...)
return data, TensMul.from_TIDS(Integer(1), TIDS(components, free, dum))
return functools.reduce(data_mul, zip(data_list, tensmul_list))
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
import numpy
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, range(rank)))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for _ in range(gener.order()-1):
data_swapped = numpy.transpose(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if (last_data - sign_change*data_swapped).any():
raise ValueError('Component data symmetry structure error')
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError(f'index type {indextype} has no components data'
' associated (needed to raise/lower index')
if indextype.dim is None:
continue
if dim != indextype.dim:
raise ValueError('wrong dimension of ndarray')
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
@staticmethod
def _contract_ndarray(free1, free2, ndarray1, ndarray2):
numpy = import_module('numpy')
def ikey(x):
return x[2], x[1]
free1 = free1[:]
free2 = free2[:]
free1.sort(key=ikey)
free2.sort(key=ikey)
self_free = [_[0] for _ in free1]
axes1 = []
axes2 = []
for jpos, jindex in enumerate(free2):
if -jindex[0] in self_free:
nidx = self_free.index(-jindex[0])
else:
continue
axes1.append(nidx)
axes2.append(jpos)
contracted_ndarray = numpy.tensordot(
ndarray1,
ndarray2,
(axes1, axes2)
)
return contracted_ndarray
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the traspose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = Matrix(data)
invt = Matrix(inverse_transpose)
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
numpy = import_module('numpy')
data = numpy.tensordot(
metric,
data,
(1, pos))
return numpy.rollaxis(data, 0, pos+1)
@staticmethod
def inverse_matrix(ndarray):
m = Matrix(ndarray).inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = Matrix(ndarray).inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
numpy = import_module('numpy')
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx._tensortype.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx._tensortype.data),
i
)
if len(dum) > 0:
# perform contractions
axes1 = []
axes2 = []
for i, indx1 in enumerate(indices):
try:
nd = indices[:i].index(-indx1)
except ValueError:
continue
axes1.append(nd)
axes2.append(i)
for ax1, ax2 in zip(axes1, axes2):
data = numpy.trace(data, axis1=ax1, axis2=ax2)
return data
@staticmethod
@doctest_depends_on(modules=('numpy',))
def parse_data(data):
"""
Transform ``data`` to a numpy ndarray. The parameter ``data`` may
contain data in various formats, e.g. nested lists, diofant ``Matrix``,
and so on.
Examples
========
>>> print(str(_TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])))
[1 3 -6 12]
>>> print(str(_TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])))
[[1 2]
[4 7]]
"""
numpy = import_module('numpy')
if (numpy is not None) and (not isinstance(data, numpy.ndarray)):
vsympify = numpy.vectorize(sympify)
data = vsympify(numpy.array(data))
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager:
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0: 0, 1: 1, 2: 2}
self._comm_i2symbol = {0: 0, 1: 1, 2: 2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
Get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""Returns the symbol corresponding to the commutation group number."""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
Set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz], [[1]])
>>> G = tensorhead('G', [Lorentz], [[1]], 'Gcomm')
>>> GH = tensorhead('GH', [Lorentz], [[1]], 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
Set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""Clear the TensorManager."""
self._comm_init()
TensorManager = _TensorManager()
@doctest_depends_on(modules=('numpy',))
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
metric : metric symmetry or metric object or ``None``
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
dummy_fmt : name of the head of dummy indices
Attributes
==========
``name``
``metric_name`` : str
it is 'metric' or metric.name
``metric_antisym``
``metric`` : TensorType
the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``dim``
``dim_eps``
``dummy_fmt``
``data`` : a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The ``metric`` parameter can be:
``metric = False`` symmetric metric (in Riemannian geometry)
``metric = True`` antisymmetric metric (for spinor calculus)
``metric = None`` there is no metric
``metric`` can be an object having ``name`` and ``antisym`` attributes.
If there is a metric the metric is used to raise and lower indices.
In the case of antisymmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
``g(-a, b) = delta(-a, b); g(b, -a) = -delta(a, -b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
Examples with metric components data added, this means it is working on a
fixed basis:
>>> Lorentz.data = [1, -1, -1, -1]
>>> print(sstr(Lorentz))
TensorIndexType(Lorentz, 0)
>>> print(str(Lorentz.data))
[[1 0 0 0]
[0 -1 0 0]
[0 0 -1 0]
[0 0 0 -1]]
"""
def __new__(cls, name, metric=False, dim=None, eps_dim=None,
dummy_fmt=None):
if isinstance(name, str):
name = Symbol(name)
obj = Basic.__new__(cls, name, Integer(1) if metric else Integer(0))
obj._name = str(name)
if not dummy_fmt:
obj._dummy_fmt = f'{obj.name}_%d'
else:
obj._dummy_fmt = f'{dummy_fmt}_%d'
if metric is None:
obj.metric_antisym = None
obj.metric = None
else:
if metric in (True, False, 0, 1):
metric_name = 'metric'
obj.metric_antisym = metric
else:
metric_name = metric.name
obj.metric_antisym = metric.antisym
sym2 = TensorSymmetry(get_symmetric_group_sgs(2, obj.metric_antisym))
S2 = TensorType([obj]*2, sym2)
obj.metric = S2(metric_name)
obj.metric._matrix_behavior = True
obj._dim = dim
obj._delta = obj.get_kronecker_delta()
obj._eps_dim = eps_dim if eps_dim else dim
obj._epsilon = obj.get_epsilon()
obj._autogenerated = []
obj._auto_left = None
obj._auto_right = None
return obj
@property
def auto_right(self):
if self._auto_right is None:
self._auto_right = TensorIndex('auto_right', self)
return self._auto_right
@property
def auto_left(self):
if self._auto_left is None:
self._auto_left = TensorIndex('auto_left', self)
return self._auto_left
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
numpy = import_module('numpy')
data = _TensorDataLazyEvaluator.parse_data(data)
if data.ndim > 2:
raise ValueError('data have to be of rank 1 (diagonal metric) or 2.')
if data.ndim == 1:
if self.dim is not None:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError('Dimension mismatch')
dim = data.shape[0]
newndarray = numpy.zeros((dim, dim), dtype=object)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError('Non-square matrix tensor.')
if self.dim is not None:
if self.dim != dim1:
raise ValueError('Dimension mismatch')
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
@property
def name(self):
return self._name
@property
def dim(self):
return self._dim
@property
def delta(self):
return self._delta
@property
def eps_dim(self):
return self._eps_dim
@property
def epsilon(self):
return self._epsilon
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
S2 = TensorType([self]*2, sym2)
delta = S2('KD')
delta._matrix_behavior = True
return delta
def get_epsilon(self):
if not isinstance(self._eps_dim, int):
return
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
Sdim = TensorType([self]*self._eps_dim, sym)
epsilon = Sdim('Eps')
return epsilon
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
@doctest_depends_on(modules=('numpy',))
class TensorIndex(Basic):
"""
Represents an abstract tensor index.
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensortype : ``TensorIndexType`` of the index
is_up : flag for contravariant index
Attributes
==========
``name``
``tensortype``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name.
Dummy indices have a name with head given by ``tensortype._dummy_fmt``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i = TensorIndex('i', Lorentz)
>>> i
i
>>> sym1 = TensorSymmetry(*get_symmetric_group_sgs(1))
>>> S1 = TensorType([Lorentz], sym1)
>>> A, B = S1('A B')
>>> A(i)*B(-i)
A(L_0)*B(-L_0)
If you want the index name to be automatically assigned, just put ``True``
in the ``name`` field, it will be generated using the reserved character
``_`` in front of its name, in order to avoid conflicts with possible
existing indices:
>>> i0 = TensorIndex(True, Lorentz)
>>> i0
_i0
>>> i1 = TensorIndex(True, Lorentz)
>>> i1
_i1
>>> A(i0)*B(-i1)
A(_i0)*B(-_i1)
>>> A(i0)*B(-i0)
A(L_0)*B(-L_0)
"""
def __new__(cls, name, tensortype, is_up=True):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = f'_i{len(tensortype._autogenerated)}'
name_symbol = Symbol(name)
tensortype._autogenerated.append(name_symbol)
else:
raise ValueError('invalid name')
obj = Basic.__new__(cls, name_symbol, tensortype, Integer(1) if is_up else Integer(0))
obj._name = str(name)
obj._tensortype = tensortype
obj._is_up = is_up
return obj
@property
def name(self):
return self._name
@property
def tensortype(self):
return self._tensortype
@property
def is_up(self):
return self._is_up
def _print(self):
s = self._name
if not self._is_up:
s = f'-{s}'
return s
def __lt__(self, other):
return (self._tensortype, self._name) < (other._tensortype, other._name)
def __neg__(self):
t1 = TensorIndex(self._name, self._tensortype,
(not self._is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : list of ``TensorIndexType`` of the indices
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a b c d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
@doctest_depends_on(modules=('numpy',))
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : Tuple
base of the BSGS
``generators`` : Tuple
generators of the BSGS
``rank`` : Tuple
rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor,
are not covered.
See Also
========
diofant.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError('bsgs required, either two separate parameters or one tuple')
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
obj = Basic.__new__(cls, base, generators, **kw_args)
return obj
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.args[1][0].size - 2
def tensorsymmetry(*args):
"""
Return a ``TensorSymmetry`` object.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
Examples
========
Symmetric tensor using a Young tableau
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
Symmetric tensor using a ``BSGS`` (base, strong generator set)
>>> sym2 = tensorsymmetry(*get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
from ..combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
@doctest_depends_on(modules=('numpy',))
class TensorType(Basic):
"""
Class of tensor types.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
Examples
========
Define a symmetric tensor
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return f'TensorType({[str(x) for x in self.index_types]})'
def __call__(self, s, comm=0, matrix_behavior=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
Examples
========
Define symmetric tensors ``V``, ``W`` and ``G``, respectively
commuting, anticommuting and with no commutation symmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a b', Lorentz)
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
>>> W = S2('W', 1)
>>> G = S2('G', 2)
>>> canon_bp(V(a, b)*V(-b, -a))
V(L_0, L_1)*V(-L_0, -L_1)
>>> canon_bp(W(a, b)*W(-b, -a))
0
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self, comm, matrix_behavior=matrix_behavior)
else:
return [TensorHead(name, self, comm, matrix_behavior=matrix_behavior) for name in names]
def tensorhead(name, typ, sym, comm=0, matrix_behavior=0):
"""
Function generating tensorhead(s).
Parameters
==========
name : name or sequence of names (as in ``symbol``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> A(a, -b)
A(a, -b)
"""
sym = tensorsymmetry(*sym)
S = TensorType(typ, sym)
th = S(name, comm, matrix_behavior=matrix_behavior)
return th
@doctest_depends_on(modules=('numpy',))
class TensorHead(Basic):
r"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
typ : list of TensorIndexType
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank``
``types`` : equal to ``typ.types``
``symmetry`` : equal to ``typ.symmetry``
``comm`` : int
commutation group
Notes
=====
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> A = tensorhead('A', [Lorentz, Lorentz], [[1], [1]])
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+2*i for j in range(4)] for i in range(4)]
in order to retrieve data, it is also necessary to specify abstract indices
enclosed by round brackets, then numerical indices inside square brackets.
>>> A(i0, i1)[0, 0]
0
>>> A(i0, i1)[2, 3] == 3+2*2
True
Notice that square brackets create a valued tensor expression instance:
>>> A(i0, i1)
A(i0, i1)
To view the data, just type:
>>> print(str(A.data))
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
Turning to a tensor expression, covariant indices get the corresponding
components data corrected by the metric:
>>> print(str(A(i0, -i1).data))
[[0 -1 -2 -3]
[2 -3 -4 -5]
[4 -5 -6 -7]
[6 -7 -8 -9]]
>>> print(str(A(-i0, -i1).data))
[[0 -1 -2 -3]
[-2 3 4 5]
[-4 5 6 7]
[-6 7 8 9]]
while if all indices are contravariant, the ``ndarray`` remains the same
>>> print(str(A(i0, i1).data))
[[0 1 2 3]
[2 3 4 5]
[4 5 6 7]
[6 7 8 9]]
When all indices are contracted and components data are added to the tensor,
accessing the data will return a scalar, no numpy object. In fact, numpy
ndarrays are dropped to scalars if they contain only one element.
>>> A(i0, -i0)
A(L_0, -L_0)
>>> A(i0, -i0).data
-18
It is also possible to assign components data to an indexed tensor, i.e. a
tensor with specified covariant and contravariant components. In this
example, the covariant components data of the Electromagnetic tensor are
injected into `A`:
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor, we have to assign an
antisymmetric matrix to it, because `[[2]]` stands for the Young tableau
representation of an antisymmetric set of two elements:
>>> F = tensorhead('A', [Lorentz, Lorentz], [[2]])
>>> F(-i0, -i1).data = [[0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> print(str(F(i0, i1).data))
[[0 -E_x/c -E_y/c -E_z/c]
[E_x/c 0 -B_z B_y]
[E_y/c B_z 0 -B_x]
[E_z/c -B_y B_x 0]]
and the mixed contravariant-covariant form:
>>> print(str(F(i0, -i1).data))
[[0 E_x/c E_y/c E_z/c]
[E_x/c 0 B_z -B_y]
[E_y/c -B_z 0 B_x]
[E_z/c B_y -B_x 0]]
To convert the numpy's ndarray to a diofant matrix, just cast:
>>> Matrix(F.data)
Matrix([
[ 0, -E_x/c, -E_y/c, -E_z/c],
[E_x/c, 0, -B_z, B_y],
[E_y/c, B_z, 0, -B_x],
[E_z/c, -B_y, B_x, 0]])
Still notice, in this last example, that accessing components data from a
tensor without specifying the indices is equivalent to assume that all
indices are contravariant.
It is also possible to store symbolic components data inside a tensor, for
example, define a four-momentum-like tensor:
>>> P = tensorhead('P', [Lorentz], [[1]])
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> P.data = [E, px, py, pz]
The contravariant and covariant components are, respectively:
>>> print(str(P(i0).data))
[E p_x p_y p_z]
>>> print(str(P(-i0).data))
[E -p_x -p_y -p_z]
The contraction of a 1-index tensor by itself is usually indicated by a
power by two:
>>> P(i0)**2
E**2 - p_x**2 - p_y**2 - p_z**2
As the power by two is clearly identical to `P_\mu P^\mu`, it is possible to
simply contract the ``TensorHead`` object, without specifying the indices
>>> P**2
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, typ, comm=0, matrix_behavior=0, **kw_args):
if isinstance(name, str):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError('invalid name')
comm2i = TensorManager.comm_symbols2i(comm)
obj = Basic.__new__(cls, name_symbol, typ, **kw_args)
obj._matrix_behavior = matrix_behavior
obj._name = obj.args[0].name
obj._rank = len(obj.index_types)
obj._types = typ.types
obj._symmetry = typ.symmetry
obj._comm = comm2i
return obj
@property
def name(self):
return self._name
@property
def rank(self):
return self._rank
@property
def types(self):
return self._types[:]
@property
def symmetry(self):
return self._symmetry
@property
def typ(self):
return self.args[1]
@property
def comm(self):
return self._comm
@property
def index_types(self):
return self.args[1].index_types[:]
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self._comm, other._comm)
return r
def _print(self):
return f"{self.name}({','.join([str(x) for x in self.index_types])})"
def _check_auto_matrix_indices_in_call(self, *indices):
matrix_behavior_kinds = {}
if len(indices) != len(self.index_types):
if not self._matrix_behavior:
raise ValueError('wrong number of indices')
# Take the last one or two missing
# indices as auto-matrix indices:
ldiff = len(self.index_types) - len(indices)
if ldiff > 2:
raise ValueError('wrong number of indices')
if ldiff == 2:
mat_ind = [len(indices), len(indices) + 1]
elif ldiff == 1:
mat_ind = [len(indices)]
not_equal = True
else:
not_equal = False
mat_ind = [i for i, e in enumerate(indices) if e is True]
if mat_ind:
not_equal = True
indices = tuple(_ for _ in indices if _ is not True)
for i, el in enumerate(indices):
if not isinstance(el, TensorIndex):
not_equal = True
break
if el._tensortype != self.index_types[i]:
not_equal = True
break
if not_equal:
for el in mat_ind:
eltyp = self.index_types[el]
if eltyp in matrix_behavior_kinds:
elind = -self.index_types[el].auto_right
matrix_behavior_kinds[eltyp].append(elind)
else:
elind = self.index_types[el].auto_left
matrix_behavior_kinds[eltyp] = [elind]
indices = indices[:el] + (elind,) + indices[el:]
return indices, matrix_behavior_kinds
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> t = A(a, -b)
>>> t
A(a, -b)
To use the auto-matrix index behavior, just put a ``True`` on the
desired index position.
>>> r = A(True, True)
>>> r
A(auto_left, -auto_right)
Here ``auto_left`` and ``auto_right`` are automatically generated
tensor indices, they are only two for every ``TensorIndexType`` and
can be assigned to just one or two indices of a given type.
Auto-matrix indices can be assigned many times in a tensor, if indices
are of different ``TensorIndexType``
>>> Spinor = TensorIndexType('Spinor', dummy_fmt='S')
>>> B = tensorhead('B', [Lorentz, Lorentz, Spinor, Spinor], [[1]*4])
>>> s = B(True, True, True, True)
>>> s
B(auto_left, -auto_right, auto_left, -auto_right)
Here, ``auto_left`` and ``auto_right`` are repeated twice, but they are
not the same indices, as they refer to different ``TensorIndexType``s.
Auto-matrix indices are automatically contracted upon multiplication,
>>> r*s
A(auto_left, L_0)*B(-L_0, -auto_right, auto_left, -auto_right)
The multiplication algorithm has found an ``auto_right`` index in ``A``
and an ``auto_left`` index in ``B`` referring to the same
``TensorIndexType`` (``Lorentz``), so they have been contracted.
Auto-matrix indices can be accessed from the ``TensorIndexType``:
>>> Lorentz.auto_right
auto_right
>>> Lorentz.auto_left
auto_left
There is a special case, in which the ``True`` parameter is not needed
to declare an auto-matrix index, i.e. when the matrix behavior has been
declared upon ``TensorHead`` construction, in that case the last one or
two tensor indices may be omitted, so that they automatically become
auto-matrix indices:
>>> C = tensorhead('C', [Lorentz, Lorentz], [[1]*2], matrix_behavior=True)
>>> C()
C(auto_left, -auto_right)
"""
indices, _ = self._check_auto_matrix_indices_in_call(*indices)
tensor = Tensor._new_with_dummy_replacement(self, indices, **kw_args)
return tensor
def __pow__(self, other):
if self.data is None:
raise ValueError('No power on abstract tensors.')
numpy = import_module('numpy')
metrics = [_.data for _ in self.args[1].args[0]]
marray = self.data
for metric in metrics:
marray = numpy.tensordot(marray, numpy.tensordot(metric, marray, (1, 0)), (0, 0))
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
return self.data.flatten().__iter__()
@doctest_depends_on(modules=('numpy',))
class TensExpr(Basic):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensAdd`` objects are put in canonical form using the Butler-Portugal
algorithm for canonicalization under monoterm symmetries.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a Diofant expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 11.0
is_commutative = False
def __neg__(self):
return self*Integer(-1)
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __sub__(self, other):
raise NotImplementedError
def __rsub__(self, other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __pow__(self, other):
if self.data is None:
raise ValueError('No power without ndarray data.')
numpy = import_module('numpy')
free = self.free
marray = self.data
for metric in free:
marray = numpy.tensordot(
marray,
numpy.tensordot(
metric[0]._tensortype.data,
marray,
(1, 0)
),
(0, 0)
)
pow2 = marray[()]
return pow2 ** (Rational(1, 2) * other)
def __rpow__(self, other):
raise NotImplementedError
def __truediv__(self, other):
raise NotImplementedError
def __rtruediv__(self, other):
raise NotImplementedError()
@doctest_depends_on(modules=('numpy',))
def get_matrix(self):
"""
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> A = S2('A')
The tensor ``A`` is symmetric in its indices, as can be deduced by the
``[1, 1]`` Young tableau when constructing `sym2`. One has to be
careful to assign symmetric component data to ``A``, as the symmetry
properties of data are currently not checked to be compatible with the
defined tensor symmetry.
>>> Lorentz.data = [1, -1, -1, -1]
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
>>> A.data = [[j+i for j in range(4)] for i in range(4)]
>>> A(i0, i1).get_matrix()
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
It is possible to perform usual operation on matrices, such as the
matrix multiplication:
>>> A(i0, i1).get_matrix()*ones(4, 1)
Matrix([
[ 6],
[10],
[14],
[18]])
>>> del A.data
"""
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
'missing multidimensional reduction to matrix.')
@doctest_depends_on(modules=('numpy',))
class TensAdd(TensExpr):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple
of addends
``rank`` : tuple
rank of the tensor
``free_args`` : list
of the free indices in sorted order
Notes
=====
Sum of more than one tensor are put automatically in canonical form.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a b', Lorentz)
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> t = p(a) + q(a)
>>> t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
Examples with components data added to the tensor expression:
>>> Lorentz.data = [1, -1, -1, -1]
>>> a, b = tensor_indices('a, b', Lorentz)
>>> p.data = [2, 3, -2, 7]
>>> q.data = [2, 3, -2, 7]
>>> t = p(a) + q(a)
>>> t
p(a) + q(a)
>>> t(b)
p(b) + q(b)
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> (p(a)*p(-a)).data
-58
>>> p(a)**2
-58
"""
def __new__(cls, *args, **kw_args):
args = [sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
if not args:
return Integer(0)
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# replace auto-matrix indices so that they are the same in all addends
args = TensAdd._tensAdd_check_automatrix(args)
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 TensMul element in its `args`:
if len(args) == 1 and isinstance(args[0], TensMul):
obj = Basic.__new__(cls, *args, **kw_args)
return obj
# TODO: do not or do canonicalize by default?
# Technically, one may wish to have additions of non-canonicalized
# tensors. This feature should be removed in the future.
# Unfortunately this would require to rewrite a lot of tests.
# canonicalize all TensMul
args = [canon_bp(x) for x in args if x]
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return Integer(0)
if len(args) == 1:
return args[0]
# collect canonicalized terms
def sort_key(t):
x = get_tids(t)
return x.components, x.free, x.dum
args.sort(key=sort_key)
args = TensAdd._tensAdd_collect_terms(args)
if not args:
return Integer(0)
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args, **kw_args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
if not all(isinstance(x, TensExpr) for x in args):
args_expanded = []
for x in args:
if isinstance(x, TensAdd):
args_expanded.extend(list(x.args))
else:
args_expanded.append(x)
args_tensor = []
args_scalar = []
for x in args_expanded:
if isinstance(x, TensExpr) and x.coeff:
args_tensor.append(x)
if not isinstance(x, TensExpr):
args_scalar.append(x)
t1 = TensMul.from_data(Add(*args_scalar), [], [], [])
args = [t1] + args_tensor
a = []
for x in args:
if isinstance(x, TensAdd):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check_automatrix(args):
# check that all automatrix indices are the same.
# if there are no addends, just return.
if not args:
return args
# @type auto_left_types: set
auto_left_types = set()
auto_right_types = set()
args_auto_left_types = []
args_auto_right_types = []
for i, arg in enumerate(args):
arg_auto_left_types = set()
arg_auto_right_types = set()
for index in get_indices(arg):
# @type index: TensorIndex
if index in (index._tensortype.auto_left, -index._tensortype.auto_left):
auto_left_types.add(index._tensortype)
arg_auto_left_types.add(index._tensortype)
if index in (index._tensortype.auto_right, -index._tensortype.auto_right):
auto_right_types.add(index._tensortype)
arg_auto_right_types.add(index._tensortype)
args_auto_left_types.append(arg_auto_left_types)
args_auto_right_types.append(arg_auto_right_types)
for arg, aas_left, aas_right in zip(args, args_auto_left_types, args_auto_right_types):
missing_left = auto_left_types - aas_left
missing_right = auto_right_types - aas_right
missing_intersection = missing_left & missing_right
for j in missing_intersection:
args[i] *= j.delta(j.auto_left, -j.auto_right)
if missing_left != missing_right:
raise ValueError('cannot determine how to add auto-matrix indices on some args')
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
indices0 = {x[0] for x in get_tids(args[0]).free}
list_indices = [{y[0] for y in get_tids(x).free} for x in args[1:]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
a = []
prev = args[0]
prev_coeff = get_coeff(prev)
changed = False
for x in args[1:]:
# if x and prev have the same tensor, update the coeff of prev
x_tids = get_tids(x)
prev_tids = get_tids(prev)
if x_tids.components == prev_tids.components \
and x_tids.free == prev_tids.free and x_tids.dum == prev_tids.dum:
prev_coeff = prev_coeff + get_coeff(x)
changed = True
op = 0
else:
# x and prev are different; if not changed, prev has not
# been updated; store it
if not changed:
a.append(prev)
else:
# get a tensor from prev with coeff=prev_coeff and store it
if prev_coeff:
t = TensMul.from_data(prev_coeff, prev_tids.components,
prev_tids.free, prev_tids.dum)
a.append(t)
# move x to prev
op = 1
prev = x
prev_coeff = get_coeff(x)
changed = False
# if the case op=0 prev was not stored; store it now
# in the case op=1 x was not stored; store it now (as prev)
if op == 0 and prev_coeff:
prev = TensMul.from_data(prev_coeff, prev_tids.components, prev_tids.free, prev_tids.dum)
a.append(prev)
elif op == 1:
a.append(prev)
return a
@property
def rank(self):
return self.args[0].rank
@property
def free_args(self):
return self.args[0].free_args
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Parameters
==========
indices
Examples
========
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> g = Lorentz.metric
>>> t = p(i0)*p(i1) + g(i0, i1)*q(i2)*q(-i2)
>>> t(i0, i2)
metric(i0, i2)*q(L_0)*q(-L_0) + p(i0)*p(i2)
>>> t(i0, i1) - t(i1, i0)
0
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.fun_eval(*index_tuples).args) for x in self.args]
res = TensAdd(*a)
return res
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
args = [x.canon_bp() for x in self.args]
res = TensAdd(*args)
return res
def equals(self, other):
other = sympify(other)
if isinstance(other, TensMul) and other._coeff == 0:
return all(x._coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t._coeff == 0
else:
return all(x._coeff == 0 for x in t.args)
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
return TensAdd(*(x*other for x in self.args))
def __rmul__(self, other):
return self*other
def __truediv__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensAdd(*(x/other for x in self.args))
def __rtruediv__(self, other):
raise ValueError('cannot divide by a tensor')
def __getitem__(self, item):
# pylint: disable=unsubscriptable-object
return self.data[item]
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
See Also
========
TensorIndexType
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args)
return canon_bp(t)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i j k l', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j) + A(i, -j)
>>> t.fun_eval((i, k), (-j, l))
A(k, L_0)*B(l, -L_0) + A(k, l)
"""
args = self.args
args1 = []
for x in args:
y = x.fun_eval(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
Parameters
==========
index_types : list of tuples ``(old_index, new_index)``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i j k l', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j)
>>> t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i, j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
args = self.args
args1 = []
for x in args:
y = x.substitute_indices(*index_tuples)
args1.append(y)
return TensAdd(*args1)
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
@staticmethod
def from_TIDS_list(coeff, tids_list):
"""
Given a list of coefficients and a list of ``TIDS`` objects, construct
a ``TensAdd`` instance, equivalent to the one that would result from
creating single instances of ``TensMul`` and then adding them.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j = tensor_indices('i j', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> ea = 3*A(i, j)
>>> eb = 2*B(j, i)
>>> t1 = ea._tids
>>> t2 = eb._tids
>>> c1 = ea.coeff
>>> c2 = eb.coeff
>>> TensAdd.from_TIDS_list([c1, c2], [t1, t2])
2*B(i, j) + 3*A(i, j)
If the coefficient parameter is a scalar, then it will be applied
as a coefficient on all ``TIDS`` objects.
>>> TensAdd.from_TIDS_list(4, [t1, t2])
4*A(i, j) + 4*B(i, j)
"""
if not isinstance(coeff, (list, tuple, Tuple)):
coeff = [coeff] * len(tids_list)
tensmul_list = [TensMul.from_TIDS(c, t) for c, t in zip(coeff, tids_list)]
return TensAdd(*tensmul_list)
@property
def data(self):
return _tensor_data_substitution_dict[self]
@doctest_depends_on(modules=('numpy',))
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = tensorhead('A', [Lorentz, Lorentz], [[1], [1]])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
"""
is_commutative = False
def __new__(cls, tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._tids = tids
obj._indices = indices
obj._is_canon_bp = kw_args.get('is_canon_bp', False)
return obj
@staticmethod
def _new_with_dummy_replacement(tensor_head, indices, **kw_args):
tids = TIDS.from_components_and_indices((tensor_head,), indices)
indices = tids.get_indices()
return Tensor(tensor_head, indices, **kw_args)
@property
def is_canon_bp(self):
return self._is_canon_bp
@property
def indices(self):
return self._indices
@property
def free(self):
return self._tids.free
@property
def dum(self):
return self._tids.dum
@property
def rank(self):
return len(self.free)
@property
def free_args(self):
return sorted(x[0] for x in self.free)
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
if self._is_canon_bp:
return self
g, dummies, msym, v = self._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return Integer(0)
tensor = self.perm2tensor(can, True)
return tensor
@property
def types(self):
return get_tids(self).components[0]._types
@property
def coeff(self):
return Integer(1)
@property
def component(self):
return self.args[0]
@property
def components(self):
return [self.args[0]]
def split(self):
return [self]
def get_indices(self):
"""Get a list of indices, corresponding to those of the tensor."""
return self._tids.get_indices()
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def __call__(self, *indices):
"""Returns tensor with ordered free indices replaced by ``indices``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
>>> A = tensorhead('A', [Lorentz]*5, [[1]*5])
>>> t = A(i2, i1, -i2, -i3, i4)
>>> t
A(L_0, i1, -L_0, -i3, i4)
>>> t(i1, i2, i3)
A(L_0, i1, -L_0, i2, i3)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len({i if i.is_up else -i for i in indices}) != len(indices):
return t.func(*t.args)
return t
def fun_eval(self, *index_tuples):
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
# TODO: put this into TensExpr?
def __iter__(self):
return self.data.flatten().__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
return self.data[item]
@property
def data(self):
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
def __mul__(self, other):
if isinstance(other, TensAdd):
return TensAdd(*[self*arg for arg in other.args])
tmul = TensMul(self, other)
return tmul
def __rmul__(self, other):
return TensMul(other, self)
def __truediv__(self, other):
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, Integer(1)/other, is_canon_bp=self.is_canon_bp)
def __rtruediv__(self, other):
raise ValueError('cannot divide by a tensor')
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __neg__(self):
return TensMul(Integer(-1), self)
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return f"{component.name}({', '.join(indices)})"
else:
return f'{component.name}'
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return Integer(1) == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components),
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, metric):
tids, sign = get_tids(self).contract_metric(metric)
return TensMul.from_TIDS(sign, tids)
@doctest_depends_on(modules=('numpy',))
class TensMul(TensExpr):
"""
Product of tensors
Parameters
==========
coeff : Diofant coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : tuple
rank of the tensor counting the dummy indices
``rank`` : tuple
rank of the tensor
``coeff`` : Expr
Diofant coefficient of the tensor
``free_args`` : list
list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
def __new__(cls, *args, **kw_args):
# make sure everything is sympified:
args = [sympify(arg) for arg in args]
# flatten:
args = TensMul._flatten(args)
is_canon_bp = kw_args.get('is_canon_bp', False)
if not any(isinstance(arg, TensExpr) for arg in args):
tids = TIDS([], [], [])
else:
tids_list = [arg._tids for arg in args if isinstance(arg, (Tensor, TensMul))]
if len(tids_list) == 1:
for arg in args:
if not isinstance(arg, Tensor):
continue
is_canon_bp = kw_args.get('is_canon_bp', arg._is_canon_bp)
tids = functools.reduce(lambda a, b: a*b, tids_list)
coeff = functools.reduce(lambda a, b: a*b, [Integer(1)] + [arg for arg in args if not isinstance(arg, TensExpr)])
args = tids.get_tensors()
if coeff != 1:
args = [coeff] + args
if len(args) == 1:
return args[0]
obj = Basic.__new__(cls, *args)
obj._types = []
for t in tids.components:
obj._types.extend(t._types)
obj._tids = tids
obj._ext_rank = len(obj._tids.free) + 2*len(obj._tids.dum)
obj._coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
@staticmethod
def _flatten(args):
a = []
for arg in args:
if isinstance(arg, TensMul):
a.extend(arg.args)
else:
a.append(arg)
return a
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
tids = TIDS(components, free, dum)
return TensMul.from_TIDS(coeff, tids, **kw_args)
@staticmethod
def from_TIDS(coeff, tids, **kw_args):
return TensMul(coeff, *tids.get_tensors(), **kw_args)
@property
def free_args(self):
return sorted(x[0] for x in self.free)
@property
def components(self):
return self._tids.components[:]
@property
def free(self):
return self._tids.free[:]
@property
def coeff(self):
return self._coeff
@property
def dum(self):
return self._tids.dum[:]
@property
def rank(self):
return len(self.free)
@property
def types(self):
return self._types[:]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self._coeff == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (get_coeff(t), tuple(t.components),
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0 m1 m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> t = p(m1)*g(m0, m2)
>>> t.get_indices()
[m1, m0, m2]
"""
return self._tids.get_indices()
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a b c d', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> t = A(a, b)*B(-b, c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if not self.args:
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def __add__(self, other):
return TensAdd(self, other)
def __radd__(self, other):
return TensAdd(other, self)
def __sub__(self, other):
return TensAdd(self, -other)
def __rsub__(self, other):
return TensAdd(other, -self)
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0 m1 m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
other = sympify(other)
if not isinstance(other, TensExpr):
coeff = self.coeff*other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
if isinstance(other, TensAdd):
return TensAdd(*[self*x for x in other.args])
new_tids = self._tids*other._tids
coeff = self.coeff*other.coeff
tmul = TensMul.from_TIDS(coeff, new_tids)
return tmul
def __rmul__(self, other):
other = sympify(other)
coeff = other*self._coeff
tmul = TensMul.from_TIDS(coeff, self._tids)
return tmul
def __truediv__(self, other):
other = sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
coeff = self._coeff/other
tmul = TensMul.from_TIDS(coeff, self._tids, is_canon_bp=self._is_canon_bp)
return tmul
def __getitem__(self, item):
return self.data[item]
def sorted_components(self):
"""
Returns a tensor with sorted components
calling the corresponding method in a ``TIDS`` object.
"""
new_tids, sign = self._tids.sorted_components()
coeff = -self.coeff if sign == -1 else self.coeff
t = TensMul.from_TIDS(coeff, new_tids)
return t
def perm2tensor(self, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0 m1 m2', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[2]])
>>> t = A(m0, -m1)*A(m1, -m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0, -m1)*A(m1, -m2)*A(m2, -m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
if not self.components:
return self
t = self.sorted_components()
g, dummies, msym, v = t._tids.canon_args()
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return Integer(0)
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
See Also
========
TensorIndexType
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> m0, m1, m2 = tensor_indices('m0 m1 m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
tids, sign = get_tids(self).contract_metric(g)
res = TensMul.from_TIDS(sign*self.coeff, tids)
return res
def substitute_indices(self, *index_tuples):
return substitute_indices(self, *index_tuples)
def fun_eval(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i j k l', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j)
>>> t
A(i, L_0)*B(-L_0, -j)
>>> t.fun_eval((i, k), (-j, l))
A(k, L_0)*B(-L_0, l)
"""
free = self.free
free1 = []
for j, ipos, cpos in free:
# search j in index_tuples
for i, v in index_tuples:
if i == j:
free1.append((v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
return TensMul.from_data(self.coeff, self.components, free1, self.dum)
def __call__(self, *indices):
"""Returns tensor product with ordered free indices replaced by ``indices``
Examples
========
>>> D = Symbol('D')
>>> Lorentz = TensorIndexType('Lorentz', dim=D, dummy_fmt='L')
>>> i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensorhead('p q', [Lorentz], [[1]])
>>> t = p(i0)*q(i1)*q(-i1)
>>> t(i1)
p(i1)*q(L_0)*q(-L_0)
"""
free_args = self.free_args
indices = list(indices)
if [x._tensortype for x in indices] != [x._tensortype for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.fun_eval(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len({i if i.is_up else -i for i in indices}) != len(indices):
return t.func(*t.args)
return t
def _print(self):
args = self.args
def get_str(arg):
return str(arg) if arg.is_Atom or isinstance(arg, TensExpr) else f'({arg!s})'
if not args:
# no arguments is equivalent to "1", i.e. TensMul().
# If tensors are constructed correctly, this should never occur.
return '1'
if self.coeff == -1:
# expressions like "-A(a)"
return '-'+'*'.join([get_str(arg) for arg in args[1:]])
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
return '*'.join([get_str(arg) for arg in self.args])
@property
def data(self):
dat = _tensor_data_substitution_dict[self]
if dat is not None:
return self.coeff * dat
def __iter__(self):
if self.data is None:
raise ValueError('No iteration on abstract tensors')
return self.data.flatten().__iter__()
def canon_bp(p):
"""Butler-Portugal canonicalization."""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""Product of tensors."""
if not a:
return TensMul.from_data(Integer(1), [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""Replace Riemann tensor with an equivalent expression.
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = (x[0] for x in free)
t0 = Rational(2, 3)*t_r
t1 = - Rational(1, 3)*t_r.substitute_indices((m, m), (n, q), (p, n), (q, p))
t2 = Rational(1, 3)*t_r.substitute_indices((m, m), (n, p), (p, n), (q, q))
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
Replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i j k l', Lorentz)
>>> R = tensorhead('R', [Lorentz]*4, [[2, 2]])
>>> t = R(i, j, k, l)*(R(-i, -j, -k, -l) - 2*R(-i, -k, -j, -l))
>>> riemann_cyclic(t)
0
"""
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3)
if not t3:
return t3
else:
return canon_bp(t3)
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_tids(t):
if isinstance(t, TensExpr):
return t._tids
return TIDS([], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return Integer(1)
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError('no coefficient associated to this tensor expression')
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
new_tids = get_tids(t).perm2tensor(g, canon_bp)
coeff = get_coeff(t)
if g[-1] != len(g) - 1:
coeff = -coeff
res = TensMul.from_TIDS(coeff, new_tids, is_canon_bp=canon_bp)
return res
def substitute_indices(t, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Note: this method will neither raise or lower the indices, it will just replace their symbol.
Examples
========
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> i, j, k, l = tensor_indices('i j k l', Lorentz)
>>> A, B = tensorhead('A B', [Lorentz]*2, [[1]*2])
>>> t = A(i, k)*B(-k, -j)
>>> t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i, j), (j, k))
A(j, L_0)*B(-L_0, -k)
"""
if not isinstance(t, TensExpr):
return t
free = t.free
free1 = []
for j, ipos, cpos in free:
for i, v in index_tuples:
if i._name == j._name and i._tensortype == j._tensortype:
if i._is_up == j._is_up:
free1.append((v, ipos, cpos))
else:
free1.append((-v, ipos, cpos))
break
else:
free1.append((j, ipos, cpos))
t = TensMul.from_data(t.coeff, t.components, free1, t.dum)
return t
|
{
"content_hash": "8864d833f0b2cffe539e44ab456ab229",
"timestamp": "",
"source": "github",
"line_count": 3740,
"max_line_length": 137,
"avg_line_length": 31.94946524064171,
"alnum_prop": 0.5394297478471182,
"repo_name": "diofant/diofant",
"id": "aa1a74533f9827e64044c0ecd940eb6e5d2b846e",
"size": "119491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/tensor/tensor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
}
|
""" Simulate Burgers' Equation
Simple advection-diffusion equation.
"""
from phi.flow import *
velocity = CenteredGrid(Noise(vector=2), extrapolation.PERIODIC, x=64, y=64, bounds=Box(x=200, y=100)) * 2
# @jit_compile # for PyTorch, TensorFlow and Jax
def burgers_step(v, dt=1.):
v = diffuse.explicit(v, 0.1, dt=dt)
v = advect.semi_lagrangian(v, v, dt=dt)
return v
for _ in view(play=False, framerate=10, namespace=globals()).range():
velocity = burgers_step(velocity)
|
{
"content_hash": "f95ad9652f2f9a2f68916d09753ad43a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 106,
"avg_line_length": 27.27777777777778,
"alnum_prop": 0.6863543788187373,
"repo_name": "tum-pbs/PhiFlow",
"id": "a9f0a7e3ef6151f24c2cd7fa308c70c40e58fadd",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/burgers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "86714"
},
{
"name": "Python",
"bytes": "1413858"
}
],
"symlink_target": ""
}
|
from ..lexer import Token
from ..model import TestCase, Keyword, For, If, Try, While
class Parser:
"""Base class for parsers."""
def __init__(self, model):
self.model = model
def handles(self, statement):
raise NotImplementedError
def parse(self, statement):
raise NotImplementedError
class BlockParser(Parser):
unhandled_tokens = Token.HEADER_TOKENS | frozenset((Token.TESTCASE_NAME,
Token.KEYWORD_NAME))
def __init__(self, model):
Parser.__init__(self, model)
self.nested_parsers = {
Token.FOR: ForParser,
Token.IF: IfParser,
Token.INLINE_IF: IfParser,
Token.TRY: TryParser,
Token.WHILE: WhileParser
}
def handles(self, statement):
return statement.type not in self.unhandled_tokens
def parse(self, statement):
parser_class = self.nested_parsers.get(statement.type)
if parser_class:
parser = parser_class(statement)
self.model.body.append(parser.model)
return parser
self.model.body.append(statement)
return None
class TestCaseParser(BlockParser):
def __init__(self, header):
BlockParser.__init__(self, TestCase(header))
class KeywordParser(BlockParser):
def __init__(self, header):
BlockParser.__init__(self, Keyword(header))
class NestedBlockParser(BlockParser):
def handles(self, statement):
return BlockParser.handles(self, statement) and \
not getattr(self.model, 'end', False)
def parse(self, statement):
if statement.type == Token.END:
self.model.end = statement
return None
return BlockParser.parse(self, statement)
class ForParser(NestedBlockParser):
def __init__(self, header):
NestedBlockParser.__init__(self, For(header))
class IfParser(NestedBlockParser):
def __init__(self, header, handle_end=True):
super().__init__(If(header))
self.handle_end = handle_end
def parse(self, statement):
if statement.type in (Token.ELSE_IF, Token.ELSE):
parser = IfParser(statement, handle_end=False)
self.model.orelse = parser.model
return parser
return NestedBlockParser.parse(self, statement)
def handles(self, statement):
if statement.type == Token.END and not self.handle_end:
return False
return super().handles(statement)
class TryParser(NestedBlockParser):
def __init__(self, header, handle_end=True):
super().__init__(Try(header))
self.handle_end = handle_end
def parse(self, statement):
if statement.type in (Token.EXCEPT, Token.ELSE, Token.FINALLY):
parser = TryParser(statement, handle_end=False)
self.model.next = parser.model
return parser
return super().parse(statement)
def handles(self, statement):
if statement.type == Token.END and not self.handle_end:
return False
return super().handles(statement)
class WhileParser(NestedBlockParser):
def __init__(self, header):
super().__init__(While(header))
|
{
"content_hash": "791a0877773bf9247fe41ad624aa7159",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 76,
"avg_line_length": 27.846153846153847,
"alnum_prop": 0.612645794966237,
"repo_name": "robotframework/robotframework",
"id": "82a2fb85b4fa7827d676dcb174cafde12562a243",
"size": "3902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robot/parsing/parser/blockparsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44632"
},
{
"name": "HTML",
"bytes": "86871"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2764220"
},
{
"name": "RobotFramework",
"bytes": "1260097"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import subprocess
import sys
import threading
import time
from pathlib import Path
from xml.etree import ElementTree as ET
import click
import requests
from prettytable import PrettyTable
DATE = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H')
TMP_PATH = Path('./tmp').resolve()
if not TMP_PATH.exists():
print('Create tmp dir. path: {}'.format(str(TMP_PATH)))
TMP_PATH.mkdir(parents=True)
OUTPUT_PATH = Path('./output').resolve()
if not OUTPUT_PATH.exists():
print('Create output dir. path: {}'.format(str(OUTPUT_PATH)))
TMP_PATH.mkdir(parents=True)
PLAYERFILE_PATH = Path(TMP_PATH, 'player.{}.swf'.format(DATE))
KEYFILE_PATH = Path(TMP_PATH, 'authkey.{}.jpg'.format(DATE))
PLAYLISTFILE_PATH = Path(TMP_PATH, 'playlist.{}.m3u8'.format(DATE))
# http://stackoverflow.com/questions/4995733/how-to-create-a-spinning-command-line-cursor-using-pythonのパクリ
class Spinner:
busy = False
delay = 0.5
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
class Response(object):
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
self.__setattr__(k, v)
class Radipy(object):
player_url = 'http://radiko.jp/apps/js/flash/myplayer-release.swf'
fms1_url = 'https://radiko.jp/v2/api/auth1_fms'
fms2_url = 'https://radiko.jp/v2/api/auth2_fms'
LANG = 'ja_JP.utf8'
auth_response = Response()
auth_success_response = Response()
def __init__(self, station_id, ft):
self.station_id = station_id
self.ft = ft
self.partialkey = ''
self.stream_url = ''
self.area_id = ''
self.title = ''
@staticmethod
def clear():
subprocess.call('rm -v {}/*.jpg'.format(TMP_PATH, shell=True))
subprocess.call('rm -v {}/*.swf'.format(TMP_PATH, shell=True))
def authenticate(self):
self._get_playerfile()
self._get_keyfile()
self._get_auth1()
self._generate_partialkey()
self._get_auth2()
print('-' * 20)
print('authentication success.')
def get_channels(self):
self.authenticate()
self._get_area_id()
self._get_area_channels()
def get_programs(self):
self.authenticate()
self._get_area_id()
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d')
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(date[:8], self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
progs = station.findall('.//prog')
for prog in progs:
title = prog.find('.//title').text
ft = prog.attrib['ft']
print(ft, title)
def create(self):
self.authenticate()
self._get_area_id()
self._get_stream_url()
spinner = Spinner()
print("Now Downloading...")
spinner.start()
if self._create_aac():
print('finish!!')
else:
print('failed!!')
spinner.stop()
def _get_playerfile(self):
if PLAYERFILE_PATH.exists():
print('playerFile already exists.')
else:
print('create playerFile...')
res = requests.get(self.player_url)
if res.status_code == 200:
with PLAYERFILE_PATH.open('wb') as file:
file.write(res.content)
if not PLAYERFILE_PATH.exists():
print('playerfile is not created.')
exit()
def _get_keyfile(self):
if KEYFILE_PATH.exists():
print('keyfile already exists.')
else:
print('create KeyFile...')
subprocess.call('swfextract -b 12 {} -o {}'.format(PLAYERFILE_PATH, KEYFILE_PATH), shell=True)
if not KEYFILE_PATH.exists():
print('keyfile is not created. confirm swfextract is installed.')
exit()
def _get_auth1(self):
print('access auth1_fms...')
headers = {
'Host': 'radiko.jp',
'pragma': 'no-cache',
'X-Radiko-App': 'pc_ts',
'X-Radiko-App-Version': '4.0.0',
'X-Radiko-User': 'test-stream',
'X-Radiko-Device': 'pc'
}
res = requests.post(url=self.fms1_url, headers=headers)
self.auth_response.body = res.text
self.auth_response.headers = res.headers
self.auth_response.authtoken = self.auth_response.headers['x-radiko-authtoken']
self.auth_response.offset = int(self.auth_response.headers['x-radiko-keyoffset'])
self.auth_response.length = int(self.auth_response.headers['x-radiko-keylength'])
def _generate_partialkey(self):
print('generate particleKey...')
with KEYFILE_PATH.open('rb+') as file:
file.seek(self.auth_response.offset)
data = file.read(self.auth_response.length)
self.partialkey = base64.b64encode(data)
def _get_auth2(self):
print('access auth2_fms...')
headers ={
'pragma': 'no-cache',
'X-Radiko-App': 'pc_ts',
'X-Radiko-App-Version': '4.0.0',
'X-Radiko-User': 'test-stream',
'X-Radiko-Device': 'pc',
'X-Radiko-Authtoken': self.auth_response.authtoken,
'X-Radiko-Partialkey': self.partialkey,
}
res = requests.post(url=self.fms2_url, headers=headers)
self.auth_success_response.body = res.text
self.auth_success_response.headers = res.headers
def _get_area_id(self):
area = self.auth_success_response.body.strip().split(',')
self.area_id = area[0]
print('area_id: {}'.format(self.area_id))
def _get_area_channels(self):
area_api_url = "http://radiko.jp/v3/station/list/{}.xml".format(self.area_id)
res = requests.get(url=area_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
stations = tree.findall('.//station')
table = PrettyTable(['id', '名前'])
table.align['station_id'] = 'l'
table.align['station_name'] = 'l'
table.padding_width = 2
for station in stations:
row = []
for child in station.iter():
if child.tag in ('id', 'name'):
row.append(child.text)
table.add_row(row)
print(table)
def _get_stream_url(self):
try:
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(self.ft[:8], self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
prog = station.find('.//prog[@ft="{}"]'.format(self.ft))
to = prog.attrib['to']
# 日を跨いでいる場合は前の日の番組表を探す
except AttributeError:
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(int(self.ft[:8]) - 1, self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
prog = station.find('.//prog[@ft="{}"]'.format(self.ft))
to = prog.attrib['to']
self.title = prog.find('.//title').text.replace(' ', '_').replace(' ', '_')
table = PrettyTable(['title'])
table.add_row([self.title])
table.padding_width = 2
print(table)
self.stream_url = 'https://radiko.jp/v2/api/ts/playlist.m3u8?l=15&station_id={}&ft={}&to={}'.format(
self.station_id,
self.ft,
to
)
def _create_aac(self):
try:
program_dir = Path(OUTPUT_PATH, self.title)
if not program_dir.exists():
print('create program dir: {}'.format(program_dir))
program_dir.mkdir()
aac_file = Path(program_dir, '{}_{}.aac'.format(self.title, self.ft[:8]))
cmd = ('ffmpeg '
'-loglevel fatal '
'-n -headers "X-Radiko-AuthToken: {}" '
'-i "{}" '
'-vn -acodec copy "{}"'.format(
self.auth_response.authtoken,
self.stream_url,
aac_file
))
subprocess.call(cmd, shell=True)
print('create aac file: {}'.format(aac_file))
return True
except Exception:
return False
@click.command(help='Radipy is CLI radiko Downloader written by python3.')
@click.option('-a', '--area', is_flag=True, help='print station id & name in your area')
@click.option('-ls', is_flag=True, help='print program titles & start time. using with -id option')
@click.option('-id', type=str, help='set station id')
@click.option('-ft', type=str, help='set start datetime str formated by yyyyMMddHHmm e.g. 201804171830')
@click.option('--clear', is_flag=True, help='clear authkey and player in tmp dir')
def main(area, id, ft, ls, clear):
if clear:
Radipy.clear()
elif area:
radipy = Radipy(0, 0)
radipy.get_channels()
elif id and ft:
radipy = Radipy(station_id=id, ft=ft)
radipy.create()
elif id and ls:
radipy = Radipy(station_id=id, ft=0)
radipy.get_programs()
if __name__ == '__main__':
main()
|
{
"content_hash": "2e09970fe3b30320b541e7d68edeb142",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 118,
"avg_line_length": 35.26027397260274,
"alnum_prop": 0.5618686868686869,
"repo_name": "sin-tanaka/radipy",
"id": "59f9bb1ecbc68a0d41286a7ce98de52d31268dd3",
"size": "10350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radipy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10350"
}
],
"symlink_target": ""
}
|
from django.http import JsonResponse
class JSONResponseMixin:
"""
A mixin that can be used to render a JSON response.
"""
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return JsonResponse(
self.get_data(context),
**response_kwargs
)
def get_data(self, context):
"""
Returns an object that will be serialized as JSON by json.dumps().
"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return context
|
{
"content_hash": "3bed7702c8f04052cb73a3e6e076fb7c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 76,
"avg_line_length": 32.2,
"alnum_prop": 0.6062111801242236,
"repo_name": "razisayyed/django-ads",
"id": "ebdbb36a6893fec6d498d971a85cdb124b27bf7b",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ads/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "235"
},
{
"name": "JavaScript",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "32645"
}
],
"symlink_target": ""
}
|
from artefact.connectors.adwords.api.reports import Reports
from googleads.errors import AdWordsReportBadRequestError
import click
import colorama
import json
import copy
import datetime
import csv
from os.path import join
@click.command()
@click.option('--report-fields', help='Report Fields to get', required=False)
@click.option('--report-type', help='CLICK_PERFORMANCE_REPORT OR CAMPAIGN_PERFORMANCE_REPORT', required=True)
@click.option('--output', help='Output directory', required=True)
@click.option('--date-range', help='Date range', required=True)
@click.option('--credentials', help='Credentials file', type=click.File('rb'), required=True)
@click.option('--customers', help='Customers file', type=click.File('rb'), required=True)
def run(customers, credentials, date_range, output, report_type, report_fields=None):
report_fields = report_fields_splitlines(report_fields)
credentials = json.load(credentials)
for customer_id in customers:
customer_id = customer_id.rstrip()
date_range_gen = parse_date(date_range)
credentials['client_customer_id'] = customer_id
reports = Reports(credentials)
try:
for day in date_range_gen:
click_report = reports.retrieve_report(report_type, 'CUSTOM_DATE', date=day, report_fields=report_fields)
output_report(customer_id, day, click_report, report_type, output)
except AdWordsReportBadRequestError as e:
print "Encountered error: " + repr(e)
def report_fields_splitlines(report_fields_doc):
report_fields = None
if report_fields_doc is not None:
with open(report_fields_doc) as f:
report_fields = f.read().splitlines()
return report_fields
def parse_date(date_range_str):
date = map(int, date_range_str.split("-"))
start_date = datetime.date(*date[:3])
end_date = datetime.date(*date[3:])
return date_range_generator(start_date, end_date)
def date_range_generator(start_date, end_date):
delta = datetime.timedelta(1)
current = start_date
while current <= end_date:
yield current
current = current + delta
def output_report(customer_id, day, click_report, report_type, output):
filename = '_'.join([customer_id, day.strftime('%Y%m%d'), report_type]) + ".csv"
path = join(output, filename)
with open(path, 'wb') as handle:
writer = csv.DictWriter(handle, click_report.headers, restval='NULL')
writer.writeheader()
for row in click_report.rows:
writer.writerow(row)
|
{
"content_hash": "d749cc0cb7124350a8ef533b18c86436",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 121,
"avg_line_length": 35.19178082191781,
"alnum_prop": 0.6843129622421176,
"repo_name": "sittingbull/artefact.connectors",
"id": "e4f21544c9247e8b966bbcd9d6e519b6cc0a691e",
"size": "2592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "artefact/tools/adwords/adwords_report_batch_downloader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "54131"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import copy
import six
from keystone.common import kvs
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
from keystone import token
from keystone.token import provider
CONF = config.CONF
LOG = log.getLogger(__name__)
class Token(token.Driver):
"""KeyValueStore backend for tokens.
This is the base implementation for any/all key-value-stores (e.g.
memcached) for the Token backend. It is recommended to only use the base
in-memory implementation for testing purposes.
"""
revocation_key = 'revocation-list'
kvs_backend = 'openstack.kvs.Memory'
def __init__(self, backing_store=None, **kwargs):
super(Token, self).__init__()
self._store = kvs.get_key_value_store('token-driver')
if backing_store is not None:
self.kvs_backend = backing_store
self._store.configure(backing_store=self.kvs_backend, **kwargs)
if self.__class__ == Token:
# NOTE(morganfainberg): Only warn if the base KVS implementation
# is instantiated.
LOG.warn(_('It is recommended to only use the base '
'key-value-store implementation for the token driver '
'for testing purposes. '
'Please use keystone.token.backends.memcache.Token '
'or keystone.token.backends.sql.Token instead.'))
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
def _prefix_user_id(self, user_id):
return 'usertokens-%s' % user_id.encode('utf-8')
def _get_key_or_default(self, key, default=None):
try:
return self._store.get(key)
except exception.NotFound:
return default
def _get_key(self, key):
return self._store.get(key)
def _set_key(self, key, value, lock=None):
self._store.set(key, value, lock)
def _delete_key(self, key):
return self._store.delete(key)
def get_token(self, token_id):
ptk = self._prefix_token_id(token_id)
try:
token_ref = self._get_key(ptk)
except exception.NotFound:
raise exception.TokenNotFound(token_id=token_id)
return token_ref
def create_token(self, token_id, data):
"""Create a token by id and data.
It is assumed the caller has performed data validation on the "data"
parameter.
"""
data_copy = copy.deepcopy(data)
ptk = self._prefix_token_id(token_id)
if not data_copy.get('expires'):
data_copy['expires'] = provider.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
# NOTE(morganfainberg): for ease of manipulating the data without
# concern about the backend, always store the value(s) in the
# index as the isotime (string) version so this is where the string is
# built.
expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
self._set_key(ptk, data_copy)
user_id = data['user']['id']
user_key = self._prefix_user_id(user_id)
self._update_user_token_list(user_key, token_id, expires_str)
if CONF.trust.enabled and data.get('trust_id'):
# NOTE(morganfainberg): If trusts are enabled and this is a trust
# scoped token, we add the token to the trustee list as well. This
# allows password changes of the trustee to also expire the token.
# There is no harm in placing the token in multiple lists, as
# _list_tokens is smart enough to handle almost any case of
# valid/invalid/expired for a given token.
token_data = data_copy['token_data']
if data_copy['token_version'] == token.provider.V2:
trustee_user_id = token_data['access']['trust'][
'trustee_user_id']
elif data_copy['token_version'] == token.provider.V3:
trustee_user_id = token_data['OS-TRUST:trust'][
'trustee_user_id']
else:
raise token.provider.UnsupportedTokenVersionException(
_('Unknown token version %s') %
data_copy.get('token_version'))
trustee_key = self._prefix_user_id(trustee_user_id)
self._update_user_token_list(trustee_key, token_id, expires_str)
return data_copy
def _get_user_token_list_with_expiry(self, user_key):
"""Return a list of tuples in the format (token_id, token_expiry) for
the user_key.
"""
return self._get_key_or_default(user_key, default=[])
def _get_user_token_list(self, user_key):
"""Return a list of token_ids for the user_key."""
token_list = self._get_user_token_list_with_expiry(user_key)
# Each element is a tuple of (token_id, token_expiry). Most code does
# not care about the expiry, it is stripped out and only a
# list of token_ids are returned.
return [t[0] for t in token_list]
def _update_user_token_list(self, user_key, token_id, expires_isotime_str):
current_time = self._get_current_time()
revoked_token_list = set([t['id'] for t in
self.list_revoked_tokens()])
with self._store.get_lock(user_key) as lock:
filtered_list = []
token_list = self._get_user_token_list_with_expiry(user_key)
for item in token_list:
try:
item_id, expires = self._format_token_index_item(item)
except (ValueError, TypeError):
# NOTE(morganfainberg): Skip on expected errors
# possibilities from the `_format_token_index_item` method.
continue
if expires < current_time:
LOG.debug(('Token `%(token_id)s` is expired, removing '
'from `%(user_key)s`.'),
{'token_id': item_id, 'user_key': user_key})
continue
if item_id in revoked_token_list:
# NOTE(morganfainberg): If the token has been revoked, it
# can safely be removed from this list. This helps to keep
# the user_token_list as reasonably small as possible.
LOG.debug(('Token `%(token_id)s` is revoked, removing '
'from `%(user_key)s`.'),
{'token_id': item_id, 'user_key': user_key})
continue
filtered_list.append(item)
filtered_list.append((token_id, expires_isotime_str))
self._set_key(user_key, filtered_list, lock)
return filtered_list
def _get_current_time(self):
return timeutils.normalize_time(timeutils.utcnow())
def _add_to_revocation_list(self, data, lock):
filtered_list = []
revoked_token_data = {}
current_time = self._get_current_time()
expires = data['expires']
if isinstance(expires, six.string_types):
expires = timeutils.parse_isotime(expires)
expires = timeutils.normalize_time(expires)
if expires < current_time:
LOG.warning(_('Token `%s` is expired, not adding to the '
'revocation list.'), data['id'])
return
revoked_token_data['expires'] = timeutils.isotime(expires,
subsecond=True)
revoked_token_data['id'] = data['id']
token_list = self._get_key_or_default(self.revocation_key, default=[])
if not isinstance(token_list, list):
# NOTE(morganfainberg): In the case that the revocation list is not
# in a format we understand, reinitialize it. This is an attempt to
# not allow the revocation list to be completely broken if
# somehow the key is changed outside of keystone (e.g. memcache
# that is shared by multiple applications). Logging occurs at error
# level so that the cloud administrators have some awareness that
# the revocation_list needed to be cleared out. In all, this should
# be recoverable. Keystone cannot control external applications
# from changing a key in some backends, however, it is possible to
# gracefully handle and notify of this event.
LOG.error(_('Reinitializing revocation list due to error '
'in loading revocation list from backend. '
'Expected `list` type got `%(type)s`. Old '
'revocation list data: %(list)r'),
{'type': type(token_list), 'list': token_list})
token_list = []
# NOTE(morganfainberg): on revocation, cleanup the expired entries, try
# to keep the list of tokens revoked at the minimum.
for token_data in token_list:
try:
expires_at = timeutils.normalize_time(
timeutils.parse_isotime(token_data['expires']))
except ValueError:
LOG.warning(_('Removing `%s` from revocation list due to '
'invalid expires data in revocation list.'),
token_data.get('id', 'INVALID_TOKEN_DATA'))
continue
if expires_at > current_time:
filtered_list.append(token_data)
filtered_list.append(revoked_token_data)
self._set_key(self.revocation_key, filtered_list, lock)
def delete_token(self, token_id):
# Test for existence
with self._store.get_lock(self.revocation_key) as lock:
data = self.get_token(token_id)
ptk = self._prefix_token_id(token_id)
result = self._delete_key(ptk)
self._add_to_revocation_list(data, lock)
return result
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
return super(Token, self).delete_tokens(
user_id=user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id,
)
def _format_token_index_item(self, item):
try:
token_id, expires = item
except (TypeError, ValueError):
LOG.debug(('Invalid token entry expected tuple of '
'`(<token_id>, <expires>)` got: `%(item)r`'),
dict(item=item))
raise
try:
expires = timeutils.normalize_time(
timeutils.parse_isotime(expires))
except ValueError:
LOG.debug(('Invalid expires time on token `%(token_id)s`:'
' %(expires)r'),
dict(token_id=token_id, expires=expires))
raise
return token_id, expires
def _token_match_tenant(self, token_ref, tenant_id):
if token_ref.get('tenant'):
return token_ref['tenant'].get('id') == tenant_id
return False
def _token_match_trust(self, token_ref, trust_id):
if not token_ref.get('trust_id'):
return False
return token_ref['trust_id'] == trust_id
def _token_match_consumer(self, token_ref, consumer_id):
try:
oauth = token_ref['token_data']['token']['OS-OAUTH1']
return oauth.get('consumer_id') == consumer_id
except KeyError:
return False
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
# This function is used to generate the list of tokens that should be
# revoked when revoking by token identifiers. This approach will be
# deprecated soon, probably in the Juno release. Setting revoke_by_id
# to False indicates that this kind of recording should not be
# performed. In order to test the revocation events, tokens shouldn't
# be deleted from the backends. This check ensures that tokens are
# still recorded.
if not CONF.token.revoke_by_id:
return []
tokens = []
user_key = self._prefix_user_id(user_id)
token_list = self._get_user_token_list_with_expiry(user_key)
current_time = self._get_current_time()
for item in token_list:
try:
token_id, expires = self._format_token_index_item(item)
except (TypeError, ValueError):
# NOTE(morganfainberg): Skip on expected error possibilities
# from the `_format_token_index_item` method.
continue
if expires < current_time:
continue
try:
token_ref = self.get_token(token_id)
except exception.TokenNotFound:
# NOTE(morganfainberg): Token doesn't exist, skip it.
continue
if token_ref:
if tenant_id is not None:
if not self._token_match_tenant(token_ref, tenant_id):
continue
if trust_id is not None:
if not self._token_match_trust(token_ref, trust_id):
continue
if consumer_id is not None:
if not self._token_match_consumer(token_ref, consumer_id):
continue
tokens.append(token_id)
return tokens
def list_revoked_tokens(self):
revoked_token_list = self._get_key_or_default(self.revocation_key,
default=[])
if isinstance(revoked_token_list, list):
return revoked_token_list
return []
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired."""
raise exception.NotImplemented()
|
{
"content_hash": "0194151c155ee828c69002876f039c7f",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 79,
"avg_line_length": 42.008849557522126,
"alnum_prop": 0.568639842707675,
"repo_name": "reeshupatel/demo",
"id": "b60c4c385b3144a3707878b90f02863e1400a5b2",
"size": "14860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/token/backends/kvs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2886403"
},
{
"name": "Shell",
"bytes": "10635"
}
],
"symlink_target": ""
}
|
from openstack import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.engine.clients.os import openstacksdk as sdk_plugin
from heat.engine import constraints
CLIENT_NAME = 'senlin'
class SenlinClientPlugin(sdk_plugin.OpenStackSDKPlugin):
exceptions_module = exceptions
def _create(self, version=None):
client = super(SenlinClientPlugin, self)._create(version=version)
return client.clustering
def _get_additional_create_args(self, version):
return {
'clustering_api_version': version or '1'
}
def generate_spec(self, spec_type, spec_props):
spec = {'properties': spec_props}
spec['type'], spec['version'] = spec_type.split('-')
return spec
def check_action_status(self, action_id):
action = self.client().get_action(action_id)
if action.status == 'SUCCEEDED':
return True
elif action.status == 'FAILED':
raise exception.ResourceInError(
status_reason=action.status_reason,
resource_status=action.status,
)
return False
def get_profile_id(self, profile_name):
profile = self.client().get_profile(profile_name)
return profile.id
def get_cluster_id(self, cluster_name):
cluster = self.client().get_cluster(cluster_name)
return cluster.id
def get_policy_id(self, policy_name):
policy = self.client().get_policy(policy_name)
return policy.id
def is_bad_request(self, ex):
return (isinstance(ex, exceptions.HttpException) and
ex.status_code == 400)
def execute_actions(self, actions):
all_executed = True
for action in actions:
if action['done']:
continue
all_executed = False
if action['action_id'] is None:
func = getattr(self.client(), action['func'])
ret = func(**action['params'])
if isinstance(ret, dict):
action['action_id'] = ret['action']
else:
action['action_id'] = ret.location.split('/')[-1]
else:
ret = self.check_action_status(action['action_id'])
action['done'] = ret
# Execute these actions one by one.
break
return all_executed
class ProfileConstraint(constraints.BaseCustomConstraint):
# If name is not unique, will raise exceptions.HttpException
expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, profile):
client.client(CLIENT_NAME).get_profile(profile)
class ClusterConstraint(constraints.BaseCustomConstraint):
# If name is not unique, will raise exceptions.HttpException
expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, value):
client.client(CLIENT_NAME).get_cluster(value)
class PolicyConstraint(constraints.BaseCustomConstraint):
# If name is not unique, will raise exceptions.HttpException
expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, value):
client.client(CLIENT_NAME).get_policy(value)
class ProfileTypeConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.StackValidationFailed,)
def validate_with_client(self, client, value):
conn = client.client(CLIENT_NAME)
type_list = conn.profile_types()
names = [pt.name for pt in type_list]
if value not in names:
not_found_message = (
_("Unable to find senlin profile type '%(pt)s', "
"available profile types are %(pts)s.") %
{'pt': value, 'pts': names}
)
raise exception.StackValidationFailed(message=not_found_message)
class PolicyTypeConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.StackValidationFailed,)
def validate_with_client(self, client, value):
conn = client.client(CLIENT_NAME)
type_list = conn.policy_types()
names = [pt.name for pt in type_list]
if value not in names:
not_found_message = (
_("Unable to find senlin policy type '%(pt)s', "
"available policy types are %(pts)s.") %
{'pt': value, 'pts': names}
)
raise exception.StackValidationFailed(message=not_found_message)
|
{
"content_hash": "f9bd74346743828600c9e05148763c89",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 76,
"avg_line_length": 34.43181818181818,
"alnum_prop": 0.622002200220022,
"repo_name": "noironetworks/heat",
"id": "408cabe296a2aef6afac6e43811b680115a3f238",
"size": "5120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/clients/os/senlin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
}
|
import ctypes
class Z3Exception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class ContextObj(ctypes.c_void_p):
def __init__(self, context):
self._as_parameter_ = context
def from_param(obj):
return obj
class Config(ctypes.c_void_p):
def __init__(self, config):
self._as_parameter_ = config
def from_param(obj):
return obj
class Symbol(ctypes.c_void_p):
def __init__(self, symbol):
self._as_parameter_ = symbol
def from_param(obj):
return obj
class Sort(ctypes.c_void_p):
def __init__(self, sort):
self._as_parameter_ = sort
def from_param(obj):
return obj
class FuncDecl(ctypes.c_void_p):
def __init__(self, decl):
self._as_parameter_ = decl
def from_param(obj):
return obj
class Ast(ctypes.c_void_p):
def __init__(self, ast):
self._as_parameter_ = ast
def from_param(obj):
return obj
class Pattern(ctypes.c_void_p):
def __init__(self, pattern):
self._as_parameter_ = pattern
def from_param(obj):
return obj
class Model(ctypes.c_void_p):
def __init__(self, model):
self._as_parameter_ = model
def from_param(obj):
return obj
class Literals(ctypes.c_void_p):
def __init__(self, literals):
self._as_parameter_ = literals
def from_param(obj):
return obj
class Constructor(ctypes.c_void_p):
def __init__(self, constructor):
self._as_parameter_ = constructor
def from_param(obj):
return obj
class ConstructorList(ctypes.c_void_p):
def __init__(self, constructor_list):
self._as_parameter_ = constructor_list
def from_param(obj):
return obj
class GoalObj(ctypes.c_void_p):
def __init__(self, goal):
self._as_parameter_ = goal
def from_param(obj):
return obj
class TacticObj(ctypes.c_void_p):
def __init__(self, tactic):
self._as_parameter_ = tactic
def from_param(obj):
return obj
class ProbeObj(ctypes.c_void_p):
def __init__(self, probe):
self._as_parameter_ = probe
def from_param(obj):
return obj
class ApplyResultObj(ctypes.c_void_p):
def __init__(self, obj):
self._as_parameter_ = obj
def from_param(obj):
return obj
class StatsObj(ctypes.c_void_p):
def __init__(self, statistics):
self._as_parameter_ = statistics
def from_param(obj):
return obj
class SolverObj(ctypes.c_void_p):
def __init__(self, solver):
self._as_parameter_ = solver
def from_param(obj):
return obj
class SolverCallbackObj(ctypes.c_void_p):
def __init__(self, solver):
self._as_parameter_ = solver
def from_param(obj):
return obj
class FixedpointObj(ctypes.c_void_p):
def __init__(self, fixedpoint):
self._as_parameter_ = fixedpoint
def from_param(obj):
return obj
class OptimizeObj(ctypes.c_void_p):
def __init__(self, optimize):
self._as_parameter_ = optimize
def from_param(obj):
return obj
class ModelObj(ctypes.c_void_p):
def __init__(self, model):
self._as_parameter_ = model
def from_param(obj):
return obj
class AstVectorObj(ctypes.c_void_p):
def __init__(self, vector):
self._as_parameter_ = vector
def from_param(obj):
return obj
class AstMapObj(ctypes.c_void_p):
def __init__(self, ast_map):
self._as_parameter_ = ast_map
def from_param(obj):
return obj
class Params(ctypes.c_void_p):
def __init__(self, params):
self._as_parameter_ = params
def from_param(obj):
return obj
class ParamDescrs(ctypes.c_void_p):
def __init__(self, paramdescrs):
self._as_parameter_ = paramdescrs
def from_param(obj):
return obj
class FuncInterpObj(ctypes.c_void_p):
def __init__(self, f):
self._as_parameter_ = f
def from_param(obj):
return obj
class FuncEntryObj(ctypes.c_void_p):
def __init__(self, e):
self._as_parameter_ = e
def from_param(obj):
return obj
class RCFNumObj(ctypes.c_void_p):
def __init__(self, e):
self._as_parameter_ = e
def from_param(obj):
return obj
|
{
"content_hash": "38e2a903b43e6448a2d1ab750a9d5f38",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 46,
"avg_line_length": 18.781115879828327,
"alnum_prop": 0.5877513711151737,
"repo_name": "HJLebbink/asm-dude",
"id": "6c93c0beea2dfa612f3bb3af682634d0a366e3c3",
"size": "4575",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "VS/CSHARP/asm-sim-lib/libs/z3-4.8.12-x64-win/bin/python/z3/z3types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "164163"
},
{
"name": "Batchfile",
"bytes": "410"
},
{
"name": "C",
"bytes": "1311140"
},
{
"name": "C#",
"bytes": "2554588"
},
{
"name": "C++",
"bytes": "644077"
},
{
"name": "Go",
"bytes": "1779"
},
{
"name": "Python",
"bytes": "4274689"
},
{
"name": "Shell",
"bytes": "2764"
}
],
"symlink_target": ""
}
|
import math
import os
import folium
import sqlalchemy as sa
import sqlalchemy.orm as orm
import uszipcode
class PopDensityFinder:
"""Returns zipcode population figures, as inhabitants per square mile."""
def __init__(self, db_fspec='/tmp/simple_db.sqlite'): # DB from uszipcode
self.engine = sa.create_engine(f'sqlite:///{db_fspec}')
meta = sa.MetaData(bind=self.engine)
self.zip_tbl = sa.Table('simple_zipcode', meta, autoload=True)
def get_pops(self, ul, lr, min_area=1):
c = self.zip_tbl.c
# We require that the ZIP's bounding box fit entirely between ul & lr.
q = (orm.sessionmaker(bind=self.engine)()
.query(c.population_density,
c.major_city,
c.zipcode,
c.lat,
c.lng)
.filter(lr[0] <= c.bounds_south) # lower right coord
.filter(c.bounds_north < ul[0]) # upper left coord
.filter(ul[1] <= c.bounds_west)
.filter(c.bounds_east < lr[1])
.filter(c.land_area_in_sqmi >= min_area)
.filter(c.population_density > 0)
.order_by(c.population_density.asc()))
return q.all()
def _clip(n, hi=1e6):
n = max(200, n)
return n if n < hi else hi + (n - hi) / 5
def pop_map(fspec='~/Desktop/map.html'):
map_ = folium.Map(
location=(37.5, -122.5),
tiles='Stamen Terrain',
)
# map_.add_child(folium.LatLngPopup())
for pop_dens, name, zipcode, lat, lng in PopDensityFinder().get_pops(
(38, -123), (36, -121)):
folium.Circle(
radius=10 * math.sqrt(_clip(pop_dens)),
location=[lat, lng],
popup=f'{name} {zipcode}, {round(pop_dens / 1e3, 1)} k',
color='crimson',
fill=False,
).add_to(map_)
map_.save(os.path.expanduser(fspec))
def menlo():
search = uszipcode.SearchEngine(simple_zipcode=False)
result = search.by_zipcode('94025')
assert f'{result.major_city}, CA' == result.post_office_city
if __name__ == '__main__':
menlo()
pop_map()
|
{
"content_hash": "fec2e00cb54062e1193d2c370efac299",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 30.685714285714287,
"alnum_prop": 0.5581936685288641,
"repo_name": "jhanley634/testing-tools",
"id": "3776ab0823313191388b3f22a3a16a3b568adfcd",
"size": "3257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem/pop_map/pop_map.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6462"
},
{
"name": "C++",
"bytes": "1183"
},
{
"name": "Java",
"bytes": "1280"
},
{
"name": "Julia",
"bytes": "12786"
},
{
"name": "Jupyter Notebook",
"bytes": "20233"
},
{
"name": "Makefile",
"bytes": "23635"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "443787"
},
{
"name": "R",
"bytes": "2161"
},
{
"name": "Rust",
"bytes": "3199"
},
{
"name": "Shell",
"bytes": "5724"
},
{
"name": "TeX",
"bytes": "129"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name="modulo",
version="0.0.1",
install_requires=["quark==0.0.1"],
py_modules=['modulo'],
packages=['modulo', 'modulo_md'])
|
{
"content_hash": "1f285e91f46711f84e0f48780ed0ef38",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.5978260869565217,
"repo_name": "bozzzzo/quark",
"id": "574f1eec156a49047262b7a76497a8f82a13a67f",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quarkc/test/emit/expected/py/modulo/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "496221"
},
{
"name": "JavaScript",
"bytes": "466971"
},
{
"name": "Python",
"bytes": "590150"
},
{
"name": "Shell",
"bytes": "1328"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry, SimpleLazyObject
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
post_data = self._encode_data(data or {}, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate, login
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
{
"content_hash": "e2fb1829de978ad53aa902623e6dd972",
"timestamp": "",
"source": "github",
"line_count": 635,
"max_line_length": 112,
"avg_line_length": 38.22834645669291,
"alnum_prop": 0.5859526261585993,
"repo_name": "lecaoquochung/ddnb.django",
"id": "1952af27eb062e6276dd5e6e0f03b6536eab9b8f",
"size": "24275",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/test/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53353"
},
{
"name": "JavaScript",
"bytes": "102434"
},
{
"name": "Python",
"bytes": "9796233"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
from .model import ExtractionModel # noqa
from .relation import Relation, merge_relations # noqa
|
{
"content_hash": "815918ae95eb3833781af2d07699971b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 55,
"avg_line_length": 49.5,
"alnum_prop": 0.7878787878787878,
"repo_name": "freewilll/abridger",
"id": "813697f1ae9a398c69dc8d986d8a922c9f52ca82",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/abridger/extraction_model/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7736"
},
{
"name": "Makefile",
"bytes": "1356"
},
{
"name": "Python",
"bytes": "218290"
}
],
"symlink_target": ""
}
|
from pyramid.traversal import find_root
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
ALLOW_SUBMITTER_ADD,
Item,
paths_filtered_by_status,
SharedItem
)
from .dataset import Dataset
from .shared_calculated_properties import (
CalculatedBiosampleSlims,
CalculatedBiosampleSynonyms,
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedVisualize
)
# importing biosample function to allow calculation of experiment biosample property
from .biosample import (
construct_biosample_summary,
generate_summary_dictionary
)
from .assay_data import assay_terms
@collection(
name='experiments',
unique_key='accession',
properties={
'title': 'Experiments',
'description': 'Listing of Experiments',
})
class Experiment(Dataset,
CalculatedBiosampleSlims,
CalculatedBiosampleSynonyms,
CalculatedAssaySynonyms,
CalculatedAssayTermID,
CalculatedVisualize):
item_type = 'experiment'
schema = load_schema('encoded:schemas/experiment.json')
embedded = Dataset.embedded + [
'files.platform',
'files.analysis_step_version.analysis_step',
'files.analysis_step_version.analysis_step.pipelines',
'related_series',
'replicates.antibody',
'replicates.library',
'assay_pipeline',
'replicates.library.biosample.submitted_by',
'replicates.library.biosample.source',
'replicates.library.biosample.applied_modifications',
'replicates.library.biosample.organism',
'replicates.library.biosample.donor',
'replicates.library.biosample.donor.organism',
'replicates.library.biosample.part_of',
'replicates.library.biosample.part_of.donor',
'replicates.library.biosample.part_of.treatments',
'replicates.library.biosample.treatments',
'replicates.library.treatments',
'possible_controls',
'target.organism',
'references',
]
audit_inherit = [
'original_files',
'original_files.replicate',
'original_files.platform',
'target',
'files.analysis_step_version.analysis_step.pipelines',
'revoked_files',
'revoked_files.replicate',
'submitted_by',
'lab',
'award',
'documents',
'replicates.antibody.characterizations',
'replicates.antibody.targets',
'replicates.library',
'replicates.library.documents',
'replicates.library.biosample',
'replicates.library.biosample.organism',
'replicates.library.biosample.treatments',
'replicates.library.biosample.donor.organism',
'replicates.library.biosample.donor',
'replicates.library.biosample.treatments',
'replicates.library.biosample.originated_from',
'replicates.library.biosample.part_of',
'replicates.library.biosample.pooled_from',
'replicates.library.spikeins_used',
'replicates.library.treatments',
'target.organism',
]
rev = Dataset.rev.copy()
rev.update({
'replicates': ('Replicate', 'experiment'),
'related_series': ('Series', 'related_datasets'),
'superseded_by': ('Experiment', 'supersedes')
})
@calculated_property(schema={
"title": "Replicates",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Replicate.experiment",
},
})
def replicates(self, request, replicates):
return paths_filtered_by_status(request, replicates)
@calculated_property(schema={
"title": "Biosample summary",
"type": "string",
})
def biosample_summary(self,
request,
replicates=None):
drop_age_sex_flag = False
dictionaries_of_phrases = []
biosample_accessions = set()
if replicates is not None:
for rep in replicates:
replicateObject = request.embed(rep, '@@object')
if replicateObject['status'] == 'deleted':
continue
if 'library' in replicateObject:
libraryObject = request.embed(replicateObject['library'], '@@object')
if libraryObject['status'] == 'deleted':
continue
if 'biosample' in libraryObject:
biosampleObject = request.embed(libraryObject['biosample'], '@@object')
if biosampleObject['status'] == 'deleted':
continue
if biosampleObject['accession'] not in biosample_accessions:
biosample_accessions.add(biosampleObject['accession'])
if biosampleObject.get('biosample_type') in [
'stem cell',
'in vitro differentiated cells']:
drop_age_sex_flag = True
organismObject = None
if 'organism' in biosampleObject:
organismObject = request.embed(biosampleObject['organism'],
'@@object')
donorObject = None
if 'donor' in biosampleObject:
donorObject = request.embed(biosampleObject['donor'], '@@object')
treatment_objects_list = None
treatments = biosampleObject.get('treatments')
if treatments is not None and len(treatments) > 0:
treatment_objects_list = []
for t in treatments:
treatment_objects_list.append(request.embed(t, '@@object'))
part_of_object = None
if 'part_of' in biosampleObject:
part_of_object = request.embed(biosampleObject['part_of'],
'@@object')
originated_from_object = None
if 'originated_from' in biosampleObject:
originated_from_object = request.embed(biosampleObject['originated_from'],
'@@object')
modifications_list = None
genetic_modifications = biosampleObject.get('applied_modifications')
if genetic_modifications:
modifications_list = []
for gm in genetic_modifications:
gm_object = request.embed(gm, '@@object')
modification_dict = {'category': gm_object.get('category')}
if gm_object.get('modified_site_by_target_id'):
modification_dict['target'] = request.embed(
gm_object.get('modified_site_by_target_id'),
'@@object')['label']
if gm_object.get('introduced_tags_array'):
modification_dict['tags'] = []
for tag in gm_object.get('introduced_tags_array'):
tag_dict = {'location': tag['location']}
if tag.get('promoter_used'):
tag_dict['promoter'] = request.embed(
tag.get('promoter_used'),
'@@object').get['label']
modification_dict['tags'].append(tag_dict)
modifications_list.append((gm_object['method'], modification_dict))
dictionary_to_add = generate_summary_dictionary(
organismObject,
donorObject,
biosampleObject.get('age'),
biosampleObject.get('age_units'),
biosampleObject.get('life_stage'),
biosampleObject.get('sex'),
biosampleObject.get('biosample_term_name'),
biosampleObject.get('biosample_type'),
biosampleObject.get('starting_amount'),
biosampleObject.get('starting_amount_units'),
biosampleObject.get('depleted_in_term_name'),
biosampleObject.get('phase'),
biosampleObject.get('subcellular_fraction_term_name'),
biosampleObject.get('synchronization'),
biosampleObject.get('post_synchronization_time'),
biosampleObject.get('post_synchronization_time_units'),
biosampleObject.get('post_treatment_time'),
biosampleObject.get('post_treatment_time_units'),
treatment_objects_list,
part_of_object,
originated_from_object,
modifications_list,
True)
dictionaries_of_phrases.append(dictionary_to_add)
if drop_age_sex_flag is True:
sentence_parts = [
'strain_background',
'experiment_term_phrase',
'phase',
'fractionated',
'synchronization',
'modifications_list',
'originated_from',
'treatments_phrase',
'depleted_in'
]
else:
sentence_parts = [
'strain_background',
'experiment_term_phrase',
'phase',
'fractionated',
'sex_stage_age',
'synchronization',
'modifications_list',
'originated_from',
'treatments_phrase',
'depleted_in'
]
if len(dictionaries_of_phrases) > 0:
return construct_biosample_summary(dictionaries_of_phrases, sentence_parts)
@calculated_property(condition='assay_term_name', schema={
"title": "Assay type",
"type": "array",
"items": {
"type": "string",
},
})
def assay_slims(self, registry, assay_term_name):
assay_term_id = assay_terms.get(assay_term_name, None)
if assay_term_id in registry['ontology']:
return registry['ontology'][assay_term_id]['assay']
return []
@calculated_property(condition='assay_term_name', schema={
"title": "Assay title",
"type": "string",
})
def assay_title(self, request, registry, assay_term_name,
replicates=None, target=None):
# This is the preferred name in generate_ontology.py if exists
assay_term_id = assay_terms.get(assay_term_name, None)
if assay_term_id in registry['ontology']:
preferred_name = registry['ontology'][assay_term_id].get('preferred_name',
assay_term_name)
if preferred_name == 'RNA-seq' and replicates is not None:
for rep in replicates:
replicateObject = request.embed(rep, '@@object')
if replicateObject['status'] == 'deleted':
continue
if 'library' in replicateObject:
preferred_name = 'total RNA-seq'
libraryObject = request.embed(replicateObject['library'], '@@object')
if 'size_range' in libraryObject and \
libraryObject['size_range'] == '<200':
preferred_name = 'small RNA-seq'
break
elif 'depleted_in_term_name' in libraryObject and \
'polyadenylated mRNA' in libraryObject['depleted_in_term_name']:
preferred_name = 'polyA depleted RNA-seq'
break
elif 'nucleic_acid_term_name' in libraryObject and \
libraryObject['nucleic_acid_term_name'] == 'polyadenylated mRNA':
preferred_name = 'polyA RNA-seq'
break
return preferred_name or assay_term_name
return assay_term_name
@calculated_property(condition='assay_term_name', schema={
"title": "Assay category",
"type": "array",
"items": {
"type": "string",
},
})
def category_slims(self, registry, assay_term_name):
assay_term_id = assay_terms.get(assay_term_name, None)
if assay_term_id in registry['ontology']:
return registry['ontology'][assay_term_id]['category']
return []
@calculated_property(condition='assay_term_name', schema={
"title": "Assay type",
"type": "array",
"items": {
"type": "string",
},
})
def type_slims(self, registry, assay_term_name):
assay_term_id = assay_terms.get(assay_term_name, None)
if assay_term_id in registry['ontology']:
return registry['ontology'][assay_term_id]['types']
return []
@calculated_property(condition='assay_term_name', schema={
"title": "Assay objective",
"type": "array",
"items": {
"type": "string",
},
})
def objective_slims(self, registry, assay_term_name):
assay_term_id = assay_terms.get(assay_term_name, None)
if assay_term_id in registry['ontology']:
return registry['ontology'][assay_term_id]['objectives']
return []
@calculated_property(schema={
"title": "Related series",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Series.related_datasets",
},
"notSubmittable": True,
})
def related_series(self, request, related_series):
return paths_filtered_by_status(request, related_series)
@calculated_property(schema={
"title": "Superseded by",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Experiment.supersedes",
},
"notSubmittable": True,
})
def superseded_by(self, request, superseded_by):
return paths_filtered_by_status(request, superseded_by)
@calculated_property(schema={
"title": "Replication type",
"description": "Calculated field that indicates the replication model",
"type": "string"
})
def replication_type(self, request, replicates=None, assay_term_name=None):
# Compare the biosamples to see if for humans they are the same donor and for
# model organisms if they are sex-matched and age-matched
biosample_dict = {}
biosample_donor_list = []
biosample_number_list = []
for rep in replicates:
replicateObject = request.embed(rep, '@@object')
if replicateObject['status'] == 'deleted':
continue
if 'library' in replicateObject:
libraryObject = request.embed(replicateObject['library'], '@@object')
if 'biosample' in libraryObject:
biosampleObject = request.embed(libraryObject['biosample'], '@@object')
biosample_dict[biosampleObject['accession']] = biosampleObject
biosample_donor_list.append(biosampleObject.get('donor'))
biosample_number_list.append(replicateObject.get('biological_replicate_number'))
biosample_species = biosampleObject.get('organism')
biosample_type = biosampleObject.get('biosample_type')
else:
# special treatment for "RNA Bind-n-Seq" they will be called unreplicated
# untill we change our mind
if assay_term_name == 'RNA Bind-n-Seq':
return 'unreplicated'
# If I have a library without a biosample,
# I cannot make a call about replicate structure
return None
else:
# REPLICATES WITH NO LIBRARIES WILL BE CAUGHT BY AUDIT (TICKET 3268)
# If I have a replicate without a library,
# I cannot make a call about the replicate structure
return None
# exclude ENCODE2
if (len(set(biosample_number_list)) < 2):
return 'unreplicated'
if biosample_type == 'cell line':
return 'isogenic'
# Since we are not looking for model organisms here, we likely need audits
if biosample_species != '/organisms/human/':
if len(set(biosample_donor_list)) == 1:
return 'isogenic'
else:
return 'anisogenic'
if len(set(biosample_donor_list)) == 0:
return None
if len(set(biosample_donor_list)) == 1:
if None in biosample_donor_list:
return None
else:
return 'isogenic'
return 'anisogenic'
matrix = {
'y': {
'facets': [
'replicates.library.biosample.donor.organism.scientific_name',
'biosample_type',
'organ_slims',
'award.project',
'assembly',
'internal_status',
'audit_category', # Added for auditmatrix
'lab.title'
],
'group_by': ['biosample_type', 'biosample_term_name'],
'label': 'Biosample',
},
'x': {
'facets': [
'assay_title',
'assay_slims',
'target.investigated_as',
'month_released',
'files.file_type',
],
'group_by': 'assay_title',
'label': 'Assay',
},
}
@collection(
name='replicates',
acl=ALLOW_SUBMITTER_ADD,
properties={
'title': 'Replicates',
'description': 'Listing of Replicates',
})
class Replicate(Item):
item_type = 'replicate'
schema = load_schema('encoded:schemas/replicate.json')
embedded = [
'antibody',
'experiment',
'library',
'library.biosample',
'library.biosample.donor',
'library.biosample.donor.organism',
]
def unique_keys(self, properties):
keys = super(Replicate, self).unique_keys(properties)
value = u'{experiment}/{biological_replicate_number}/{technical_replicate_number}'.format(
**properties)
keys.setdefault('replicate:experiment_biological_technical', []).append(value)
return keys
def __ac_local_roles__(self):
properties = self.upgrade_properties()
root = find_root(self)
experiment = root.get_by_uuid(properties['experiment'])
return experiment.__ac_local_roles__()
|
{
"content_hash": "2d01d40e06e12d76af07bfe5f32d9363",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 106,
"avg_line_length": 41.553941908713696,
"alnum_prop": 0.5102601228219082,
"repo_name": "T2DREAM/t2dream-portal",
"id": "64e4033a6abbf14d92557473973f1b6776d960e7",
"size": "20029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/encoded/types/experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
}
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
from .. import backend as K
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
"""Loads an image into PIL format.
# Arguments
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size:
hw_tuple = (target_size[1], target_size[0])
if img.size != hw_tuple:
img = img.resize(hw_tuple)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format should be "channels_last" (channel after row and '
'column) or "channels_first" (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg',
follow_links=False):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
raise ValueError(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)
class Iterator(object):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
raise ValueError('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg',
follow_links=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.samples += 1
print('Found %d images belonging to %d classes.' % (self.samples, self.num_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for root, _, files in _recursive_list(subpath):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
i += 1
# add filename relative to directory
absolute_path = os.path.join(root, fname)
self.filenames.append(os.path.relpath(absolute_path, directory))
super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
|
{
"content_hash": "b20bbadfa938bf76f86ea716d1ad71eb",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 115,
"avg_line_length": 41.50655903128153,
"alnum_prop": 0.5536916830768482,
"repo_name": "baojianzhou/DLReadingGroup",
"id": "889827cb4b23f4c1ffed837c170419d752f3b20a",
"size": "41133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/keras/preprocessing/image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "1681423"
}
],
"symlink_target": ""
}
|
import hashlib
import json
import logging
import os
import os.path
import time
import uuid
import warnings
from collections import deque
from io import SEEK_END, BytesIO
from typing import (
Callable,
Dict,
Iterable,
List,
MutableMapping,
Optional,
Tuple,
Type,
Union,
)
from unittest.mock import Mock
import attr
from typing_extensions import Deque
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IAddress,
IConsumer,
IHostnameResolver,
IProtocol,
IPullProducer,
IPushProducer,
IReactorPluggableNameResolver,
IReactorTime,
IResolverSimple,
ITransport,
)
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http_headers import Headers
from twisted.web.resource import IResource
from twisted.web.server import Request, Site
from synapse.config.database import DatabaseConnectionConfig
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseRequest
from synapse.logging.context import ContextResourceUsage
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import PostgresEngine, create_engine
from synapse.types import JsonDict
from synapse.util import Clock
from tests.utils import (
LEAVE_DB,
POSTGRES_BASE_DB,
POSTGRES_HOST,
POSTGRES_PASSWORD,
POSTGRES_PORT,
POSTGRES_USER,
SQLITE_PERSIST_DB,
USE_POSTGRES_FOR_TESTS,
MockClock,
default_config,
)
logger = logging.getLogger(__name__)
# the type of thing that can be passed into `make_request` in the headers list
CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]]
class TimedOutException(Exception):
"""
A web query timed out.
"""
@implementer(IConsumer)
@attr.s(auto_attribs=True)
class FakeChannel:
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
site: Union[Site, "FakeSite"]
_reactor: MemoryReactorClock
result: dict = attr.Factory(dict)
_ip: str = "127.0.0.1"
_producer: Optional[Union[IPullProducer, IPushProducer]] = None
resource_usage: Optional[ContextResourceUsage] = None
_request: Optional[Request] = None
@property
def request(self) -> Request:
assert self._request is not None
return self._request
@request.setter
def request(self, request: Request) -> None:
assert self._request is None
self._request = request
@property
def json_body(self) -> JsonDict:
body = json.loads(self.text_body)
assert isinstance(body, dict)
return body
@property
def json_list(self) -> List[JsonDict]:
body = json.loads(self.text_body)
assert isinstance(body, list)
return body
@property
def text_body(self) -> str:
"""The body of the result, utf-8-decoded.
Raises an exception if the request has not yet completed.
"""
if not self.is_finished:
raise Exception("Request not yet completed")
return self.result["body"].decode("utf8")
def is_finished(self) -> bool:
"""check if the response has been completely received"""
return self.result.get("done", False)
@property
def code(self) -> int:
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
@property
def headers(self) -> Headers:
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
return h
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content: bytes) -> None:
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
# Type ignore: mypy doesn't like the fact that producer isn't an IProducer.
def registerProducer( # type: ignore[override]
self,
producer: Union[IPullProducer, IPushProducer],
streaming: bool,
) -> None:
self._producer = producer
self.producerStreaming = streaming
def _produce() -> None:
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self) -> None:
if self._producer is None:
return
self._producer = None
def requestDone(self, _self: Request) -> None:
self.result["done"] = True
if isinstance(_self, SynapseRequest):
assert _self.logcontext is not None
self.resource_usage = _self.logcontext.get_resource_usage()
def getPeer(self) -> IAddress:
# We give an address so that getClientAddress/getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", self._ip, 3423)
def getHost(self) -> IAddress:
# this is called by Request.__init__ to configure Request.host.
return address.IPv4Address("TCP", "127.0.0.1", 8888)
def isSecure(self) -> bool:
return False
@property
def transport(self) -> "FakeChannel":
return self
def await_result(self, timeout_ms: int = 1000) -> None:
"""
Wait until the request is finished.
"""
end_time = self._reactor.seconds() + timeout_ms / 1000.0
self._reactor.run()
while not self.is_finished():
# If there's a producer, tell it to resume producing so we get content
if self._producer:
self._producer.resumeProducing()
if self._reactor.seconds() > end_time:
raise TimedOutException("Timed out waiting for request to finish.")
self._reactor.advance(0.1)
def extract_cookies(self, cookies: MutableMapping[str, str]) -> None:
"""Process the contents of any Set-Cookie headers in the response
Any cookines found are added to the given dict
"""
headers = self.headers.getRawHeaders("Set-Cookie")
if not headers:
return
for h in headers:
parts = h.split(";")
k, v = parts[0].split("=", maxsplit=1)
cookies[k] = v
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
def __init__(
self,
resource: IResource,
reactor: IReactorTime,
experimental_cors_msc3886: bool = False,
):
"""
Args:
resource: the resource to be used for rendering all requests
"""
self._resource = resource
self.reactor = reactor
self.experimental_cors_msc3886 = experimental_cors_msc3886
def getResourceFor(self, request):
return self._resource
def make_request(
reactor,
site: Union[Site, FakeSite],
method: Union[bytes, str],
path: Union[bytes, str],
content: Union[bytes, str, JsonDict] = b"",
access_token: Optional[str] = None,
request: Type[Request] = SynapseRequest,
shorthand: bool = True,
federation_auth_origin: Optional[bytes] = None,
content_is_form: bool = False,
await_result: bool = True,
custom_headers: Optional[Iterable[CustomHeaderType]] = None,
client_ip: str = "127.0.0.1",
) -> FakeChannel:
"""
Make a web request using the given method, path and content, and render it
Returns the fake Channel object which records the response to the request.
Args:
reactor:
site: The twisted Site to use to render the request
method: The HTTP request method ("verb").
path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such).
content: The body of the request. JSON-encoded, if a str of bytes.
access_token: The access token to add as authorization for the request.
request: The request class to create.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin: if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
await_result: whether to wait for the request to complete rendering. If true,
will pump the reactor until the the renderer tells the channel the request
is finished.
custom_headers: (name, value) pairs to add as request headers
client_ip: The IP to use as the requesting IP. Useful for testing
ratelimiting.
Returns:
channel
"""
if not isinstance(method, bytes):
method = method.encode("ascii")
if not isinstance(path, bytes):
path = path.encode("ascii")
# Decorate it to be the full path, if we're using shorthand
if (
shorthand
and not path.startswith(b"/_matrix")
and not path.startswith(b"/_synapse")
):
if path.startswith(b"/"):
path = path[1:]
path = b"/_matrix/client/r0/" + path
if not path.startswith(b"/"):
path = b"/" + path
if isinstance(content, dict):
content = json.dumps(content).encode("utf8")
if isinstance(content, str):
content = content.encode("utf8")
channel = FakeChannel(site, reactor, ip=client_ip)
req = request(channel, site)
channel.request = req
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(0, SEEK_END)
# Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded
# bodies if the Content-Length header is missing
req.requestHeaders.addRawHeader(
b"Content-Length", str(len(content)).encode("ascii")
)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization",
b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,),
)
if content:
if content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
else:
# Assume the body is JSON
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
if custom_headers:
for k, v in custom_headers:
req.requestHeaders.addRawHeader(k, v)
req.parseCookies()
req.requestReceived(method, path, b"1.1")
if await_result:
channel.await_result()
return channel
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self.threadpool = ThreadPool(self)
self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {}
self._udp = []
self.lookups: Dict[str, str] = {}
self._thread_callbacks: Deque[Callable[[], None]] = deque()
lookups = self.lookups
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeout=None):
if name not in lookups:
return fail(DNSLookupError("OH NO: unknown %s" % (name,)))
return succeed(lookups[name])
self.nameResolver = SimpleResolverComplexifier(FakeResolver())
super().__init__()
def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver:
raise NotImplementedError()
def listenUDP(self, port, protocol, interface="", maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
cb = lambda: callback(*args, **kwargs)
# it's not safe to call callLater() here, so we append the callback to a
# separate queue.
self._thread_callbacks.append(cb)
def getThreadPool(self):
return self.threadpool
def add_tcp_client_callback(self, host: str, port: int, callback: Callable):
"""Add a callback that will be invoked when we receive a connection
attempt to the given IP/port using `connectTCP`.
Note that the callback gets run before we return the connection to the
client, which means callbacks cannot block while waiting for writes.
"""
self._tcp_callbacks[(host, port)] = callback
def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None):
"""Fake L{IReactorTCP.connectTCP}."""
conn = super().connectTCP(
host, port, factory, timeout=timeout, bindAddress=None
)
callback = self._tcp_callbacks.get((host, port))
if callback:
callback()
return conn
def advance(self, amount):
# first advance our reactor's time, and run any "callLater" callbacks that
# makes ready
super().advance(amount)
# now run any "callFromThread" callbacks
while True:
try:
callback = self._thread_callbacks.popleft()
except IndexError:
break
callback()
# check for more "callLater" callbacks added by the thread callback
# This isn't required in a regular reactor, but it ends up meaning that
# our database queries can complete in a single call to `advance` [1] which
# simplifies tests.
#
# [1]: we replace the threadpool backing the db connection pool with a
# mock ThreadPool which doesn't really use threads; but we still use
# reactor.callFromThread to feed results back from the db functions to the
# main thread.
super().advance(0)
class ThreadPool:
"""
Threadless thread pool.
"""
def __init__(self, reactor):
self._reactor = reactor
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
self._reactor.callLater(0, d.callback, True)
return d
def _make_test_homeserver_synchronous(server: HomeServer) -> None:
"""
Make the given test homeserver's database interactions synchronous.
"""
clock = server.get_clock()
for database in server.get_datastores().databases:
pool = database._db_pool
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs,
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs,
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
# Replace the thread pool with a threadless 'thread' pool
pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
# We've just changed the Databases to run DB transactions on the same
# thread, so we need to disable the dedicated thread behaviour.
server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False
def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]:
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
@implementer(ITransport)
@attr.s(cmp=False)
class FakeTransport:
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
_protocol = attr.ib(default=None)
"""The Protocol which is producing data for this transport. Optional, but if set
will get called back for connectionLost() notifications etc.
"""
_peer_address: Optional[IAddress] = attr.ib(default=None)
"""The value to be returned by getPeer"""
_host_address: Optional[IAddress] = attr.ib(default=None)
"""The value to be returned by getHost"""
disconnecting = False
disconnected = False
connected = True
buffer = attr.ib(default=b"")
producer = attr.ib(default=None)
autoflush = attr.ib(default=True)
def getPeer(self) -> Optional[IAddress]:
return self._peer_address
def getHost(self) -> Optional[IAddress]:
return self._host_address
def loseConnection(self, reason=None):
if not self.disconnecting:
logger.info("FakeTransport: loseConnection(%s)", reason)
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
# if we still have data to write, delay until that is done
if self.buffer:
logger.info(
"FakeTransport: Delaying disconnect until buffer is flushed"
)
else:
self.connected = False
self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
if not self.disconnecting:
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(None)
self.disconnected = True
def pauseProducing(self):
if not self.producer:
return
self.producer.pauseProducing()
def resumeProducing(self):
if not self.producer:
return
self.producer.resumeProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
if not self.producer:
# we've been unregistered
return
# some implementations of IProducer (for example, FileSender)
# don't return a deferred.
d = maybeDeferred(self.producer.resumeProducing)
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
if self.disconnecting:
raise Exception("Writing to disconnecting FakeTransport")
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
# TLSMemoryBIOProtocol) get very confused if a read comes back while they are
# still doing a write. Doing a callLater here breaks the cycle.
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
def writeSequence(self, seq):
for x in seq:
self.write(x)
def flush(self, maxbytes=None):
if not self.buffer:
# nothing to do. Don't write empty buffers: it upsets the
# TLSMemoryBIOProtocol
return
if self.disconnected:
return
if maxbytes is not None:
to_write = self.buffer[:maxbytes]
else:
to_write = self.buffer
logger.info("%s->%s: %s", self._protocol, self.other, to_write)
try:
self.other.dataReceived(to_write)
except Exception as e:
logger.exception("Exception writing to protocol: %s", e)
return
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
if not self.buffer and self.disconnecting:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
def connect_client(
reactor: ThreadedMemoryReactorClock, client_id: int
) -> Tuple[IProtocol, AccumulatingProtocol]:
"""
Connect a client to a fake TCP transport.
Args:
reactor
factory: The connecting factory to build.
"""
factory = reactor.tcpClients.pop(client_id)[2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor))
return client, server
class TestHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
def setup_test_homeserver(
cleanup_func,
name="test",
config=None,
reactor=None,
homeserver_to_use: Type[HomeServer] = TestHomeServer,
**kwargs,
):
"""
Setup a homeserver suitable for running tests against. Keyword arguments
are passed to the Homeserver constructor.
If no datastore is supplied, one is created and given to the homeserver.
Args:
cleanup_func : The function used to register a cleanup routine for
after the test.
Calling this method directly is deprecated: you should instead derive from
HomeserverTestCase.
"""
if reactor is None:
from twisted.internet import reactor
if config is None:
config = default_config(name, parse=True)
config.caches.resize_all_caches()
config.ldap_enabled = False
if "clock" not in kwargs:
kwargs["clock"] = MockClock()
if USE_POSTGRES_FOR_TESTS:
test_db = "synapse_test_%s" % uuid.uuid4().hex
database_config = {
"name": "psycopg2",
"args": {
"database": test_db,
"host": POSTGRES_HOST,
"password": POSTGRES_PASSWORD,
"user": POSTGRES_USER,
"port": POSTGRES_PORT,
"cp_min": 1,
"cp_max": 5,
},
}
else:
if SQLITE_PERSIST_DB:
# The current working directory is in _trial_temp, so this gets created within that directory.
test_db_location = os.path.abspath("test.db")
logger.debug("Will persist db to %s", test_db_location)
# Ensure each test gets a clean database.
try:
os.remove(test_db_location)
except FileNotFoundError:
pass
else:
logger.debug("Removed existing DB at %s", test_db_location)
else:
test_db_location = ":memory:"
database_config = {
"name": "sqlite3",
"args": {"database": test_db_location, "cp_min": 1, "cp_max": 1},
}
if "db_txn_limit" in kwargs:
database_config["txn_limit"] = kwargs["db_txn_limit"]
database = DatabaseConnectionConfig("master", database_config)
config.database.databases = [database]
db_engine = create_engine(database.config)
# Create the database before we actually try and connect to it, based off
# the template database we generate in setupdb()
if isinstance(db_engine, PostgresEngine):
db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
port=POSTGRES_PORT,
password=POSTGRES_PASSWORD,
)
db_conn.autocommit = True
cur = db_conn.cursor()
cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
cur.execute(
"CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB)
)
cur.close()
db_conn.close()
hs = homeserver_to_use(
name,
config=config,
version_string="Synapse/tests",
reactor=reactor,
)
# Install @cache_in_self attributes
for key, val in kwargs.items():
setattr(hs, "_" + key, val)
# Mock TLS
hs.tls_server_context_factory = Mock()
hs.setup()
if homeserver_to_use == TestHomeServer:
hs.setup_background_tasks()
if isinstance(db_engine, PostgresEngine):
database = hs.get_datastores().databases[0]
# We need to do cleanup on PostgreSQL
def cleanup():
import psycopg2
# Close all the db pools
database._db_pool.close()
dropped = False
# Drop the test database
db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
port=POSTGRES_PORT,
password=POSTGRES_PASSWORD,
)
db_conn.autocommit = True
cur = db_conn.cursor()
# Try a few times to drop the DB. Some things may hold on to the
# database for a few more seconds due to flakiness, preventing
# us from dropping it when the test is over. If we can't drop
# it, warn and move on.
for _ in range(5):
try:
cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
db_conn.commit()
dropped = True
except psycopg2.OperationalError as e:
warnings.warn(
"Couldn't drop old db: " + str(e), category=UserWarning
)
time.sleep(0.5)
cur.close()
db_conn.close()
if not dropped:
warnings.warn("Failed to drop old DB.", category=UserWarning)
if not LEAVE_DB:
# Register the cleanup hook
cleanup_func(cleanup)
# bcrypt is far too slow to be doing in unit tests
# Need to let the HS build an auth handler and then mess with it
# because AuthHandler's constructor requires the HS, so we can't make one
# beforehand and pass it in to the HS's constructor (chicken / egg)
async def hash(p):
return hashlib.md5(p.encode("utf8")).hexdigest()
hs.get_auth_handler().hash = hash
async def validate_hash(p, h):
return hashlib.md5(p.encode("utf8")).hexdigest() == h
hs.get_auth_handler().validate_hash = validate_hash
# Make the threadpool and database transactions synchronous for testing.
_make_test_homeserver_synchronous(hs)
# Load any configured modules into the homeserver
module_api = hs.get_module_api()
for module, config in hs.config.modules.loaded_modules:
module(config=config, api=module_api)
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
load_legacy_password_auth_providers(hs)
return hs
|
{
"content_hash": "5b3bd749f7f52d9cb42b0c17efcd8a57",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 106,
"avg_line_length": 31.293864370290635,
"alnum_prop": 0.6170886075949367,
"repo_name": "matrix-org/synapse",
"id": "b1730fcc8dd5f398941487fba7debfec3cd4a9ab",
"size": "29673",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7229"
},
{
"name": "Dockerfile",
"bytes": "9316"
},
{
"name": "Gherkin",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "66000"
},
{
"name": "JavaScript",
"bytes": "15635"
},
{
"name": "Jinja",
"bytes": "7687"
},
{
"name": "Lua",
"bytes": "241"
},
{
"name": "Perl",
"bytes": "28191"
},
{
"name": "Python",
"bytes": "10632037"
},
{
"name": "Rust",
"bytes": "57034"
},
{
"name": "Shell",
"bytes": "53124"
}
],
"symlink_target": ""
}
|
import acos_client.errors as ae
import base
class Action(base.BaseV30):
def write_memory(self, **kwargs):
payload = {
"memory": {
"primary": True
}
}
try:
self._post("/write/memory/", payload, **kwargs)
except ae.AxapiJsonFormatError:
# Workaround regression in 4.1.0 backwards compat
self._post("/write/memory/", "", **kwargs)
def activate_and_write(self, partition, **kwargs):
self.write_memory()
|
{
"content_hash": "0305d2db361251049e45cf6db6632c59",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.5435606060606061,
"repo_name": "dougwig/acos-client",
"id": "44fb7b4040e1003c01a66d7e2a3898d2a7e23b2f",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acos_client/v30/action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "240705"
}
],
"symlink_target": ""
}
|
"""
System services
===============
This module provides low-level tools for managing system services,
using the ``service`` command. It supports both `upstart`_ services
and traditional SysV-style ``/etc/init.d/`` scripts.
.. _upstart: http://upstart.ubuntu.com/
"""
from __future__ import with_statement
from fabric.api import *
def is_running(service):
"""
Check if a service is running.
::
import fabtools
if fabtools.service.is_running('foo'):
print "Service foo is running!"
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = sudo('service %(service)s status' % locals())
return res.succeeded
def start(service):
"""
Start a service.
::
import fabtools
# Start service if it is not running
if not fabtools.service.is_running('foo'):
fabtools.service.start('foo')
"""
sudo('service %(service)s start' % locals())
def stop(service):
"""
Stop a service.
::
import fabtools
# Stop service if it is running
if fabtools.service.is_running('foo'):
fabtools.service.stop('foo')
"""
sudo('service %(service)s stop' % locals())
def restart(service):
"""
Restart a service.
::
import fabtools
# Start service, or restart it if it is already running
if fabtools.service.is_running('foo'):
fabtools.service.restart('foo')
else:
fabtools.service.start('foo')
"""
sudo('service %(service)s restart' % locals())
def reload(service):
"""
Reload a service.
::
import fabtools
# Reload service
fabtools.service.reload('foo')
.. warning::
The service needs to support the ``reload`` operation.
"""
sudo('service %(service)s reload' % locals())
def force_reload(service):
"""
Force reload a service.
::
import fabtools
# Force reload service
fabtools.service.force_reload('foo')
.. warning::
The service needs to support the ``force-reload`` operation.
"""
sudo('service %(service)s force-reload' % locals())
|
{
"content_hash": "1967d43e5b95cb342abac856652c7b26",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 83,
"avg_line_length": 19.778761061946902,
"alnum_prop": 0.5834451901565996,
"repo_name": "pahaz/fabtools",
"id": "38165504baee2a4cce3575f0eea2909c5b8f3df5",
"size": "2235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabtools/service.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "140349"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfbench import model_config
from ray.experimental.sgd.model import Model
import ray.experimental.tf_utils as ray_tf_utils
class MockDataset():
name = "synthetic"
class TFBenchModel(Model):
def __init__(self, batch=64, use_cpus=False):
image_shape = [batch, 224, 224, 3]
labels_shape = [batch]
# Synthetic image should be within [0, 255].
images = tf.truncated_normal(
image_shape,
dtype=tf.float32,
mean=127,
stddev=60,
name="synthetic_images")
# Minor hack to avoid H2D copy when using synthetic data
inputs = tf.contrib.framework.local_variable(
images, name="gpu_cached_images")
labels = tf.random_uniform(
labels_shape,
minval=0,
maxval=999,
dtype=tf.int32,
name="synthetic_labels")
model = model_config.get_model_config("resnet101", MockDataset())
logits, aux = model.build_network(
inputs, data_format=use_cpus and "NHWC" or "NCHW")
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
# Implement model interface
self.loss = tf.reduce_mean(loss, name="xentropy-loss")
self.optimizer = tf.train.GradientDescentOptimizer(1e-6)
self.variables = ray_tf_utils.TensorFlowVariables(
self.loss, tf.get_default_session())
def get_loss(self):
return self.loss
def get_optimizer(self):
return self.optimizer
def get_feed_dict(self):
return {}
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
self.variables.set_flat(weights)
|
{
"content_hash": "7559f7d629df2c560c5a6d97eda09bb8",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 73,
"avg_line_length": 28.71212121212121,
"alnum_prop": 0.61688654353562,
"repo_name": "atumanov/ray",
"id": "99900c43bb051aad3424d6d8fb2be672d7efbfb7",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/sgd/tfbench/test_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
}
|
import ast
import os
import shutil
from typing import Any, Dict, List, Optional, Union
from spython.main import Client
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
class SingularityOperator(BaseOperator):
"""
Execute a command inside a Singularity container
Singularity has more seamless connection to the host than Docker, so
no special binds are needed to ensure binding content in the user $HOME
and temporary directories. If the user needs custom binds, this can
be done with --volumes
:param image: Singularity image or URI from which to create the container.
:type image: str
:param auto_remove: Delete the container when the process exits.
The default is False.
:type auto_remove: bool
:param command: Command to be run in the container. (templated)
:type command: str or list
:param start_command: Start command to pass to the container instance.
:type start_command: str or list
:param environment: Environment variables to set in the container. (templated)
:type environment: dict
:param working_dir: Set a working directory for the instance.
:type working_dir: str
:param force_pull: Pull the image on every run. Default is False.
:type force_pull: bool
:param volumes: List of volumes to mount into the container, e.g.
``['/host/path:/container/path', '/host/path2:/container/path2']``.
:type volumes: Optional[List[str]]
:param options: Other flags (list) to provide to the instance start.
:type options: list
:param working_dir: Working directory to
set on the container (equivalent to the -w switch the docker client).
:type working_dir: str
"""
template_fields = (
'command',
'environment',
)
template_ext = (
'.sh',
'.bash',
)
template_fields_renderers = {"command": "bash", "environment": "json"}
def __init__( # pylint: disable=too-many-arguments
self,
*,
image: str,
command: Union[str, ast.AST],
start_command: Optional[Union[str, List[str]]] = None,
environment: Optional[Dict[str, Any]] = None,
pull_folder: Optional[str] = None,
working_dir: Optional[str] = None,
force_pull: Optional[bool] = False,
volumes: Optional[List[str]] = None,
options: Optional[List[str]] = None,
auto_remove: Optional[bool] = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.auto_remove = auto_remove
self.command = command
self.start_command = start_command
self.environment = environment or {}
self.force_pull = force_pull
self.image = image
self.instance = None
self.options = options or []
self.pull_folder = pull_folder
self.volumes = volumes or []
self.working_dir = working_dir
self.cli = None
self.container = None
def execute(self, context) -> None:
self.log.info('Preparing Singularity container %s', self.image)
self.cli = Client
if not self.command:
raise AirflowException('You must define a command.')
# Pull the container if asked, and ensure not a binary file
if self.force_pull and not os.path.exists(self.image):
self.log.info('Pulling container %s', self.image)
image = self.cli.pull( # type: ignore[attr-defined]
self.image, stream=True, pull_folder=self.pull_folder
)
# If we need to stream result for the user, returns lines
if isinstance(image, list):
lines = image.pop()
image = image[0]
for line in lines:
self.log.info(line)
# Update the image to be a filepath on the system
self.image = image
# Prepare list of binds
for bind in self.volumes:
self.options += ['--bind', bind]
# Does the user want a custom working directory?
if self.working_dir is not None:
self.options += ['--workdir', self.working_dir]
# Export environment before instance is run
for enkey, envar in self.environment.items():
self.log.debug('Exporting %s=%s', envar, enkey)
os.putenv(enkey, envar)
os.environ[enkey] = envar
# Create a container instance
self.log.debug('Options include: %s', self.options)
self.instance = self.cli.instance( # type: ignore[attr-defined]
self.image, options=self.options, args=self.start_command, start=False
)
self.instance.start() # type: ignore[attr-defined]
self.log.info(self.instance.cmd) # type: ignore[attr-defined]
self.log.info('Created instance %s from %s', self.instance, self.image)
self.log.info('Running command %s', self._get_command())
self.cli.quiet = True # type: ignore[attr-defined]
result = self.cli.execute( # type: ignore[attr-defined]
self.instance, self._get_command(), return_result=True
)
# Stop the instance
self.log.info('Stopping instance %s', self.instance)
self.instance.stop() # type: ignore[attr-defined]
if self.auto_remove is True:
if self.auto_remove and os.path.exists(self.image):
shutil.rmtree(self.image)
# If the container failed, raise the exception
if result['return_code'] != 0:
message = result['message']
raise AirflowException(f'Singularity failed: {message}')
self.log.info('Output from command %s', result['message'])
def _get_command(self) -> Optional[Any]:
if self.command is not None and self.command.strip().find('[') == 0: # type: ignore
commands = ast.literal_eval(self.command)
else:
commands = self.command
return commands
def on_kill(self) -> None:
if self.instance is not None:
self.log.info('Stopping Singularity instance')
self.instance.stop()
# If an image exists, clean it up
if self.auto_remove is True:
if self.auto_remove and os.path.exists(self.image):
shutil.rmtree(self.image)
|
{
"content_hash": "77aa8d628c6cae6c9f2d76adb504aa72",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 92,
"avg_line_length": 37.075581395348834,
"alnum_prop": 0.6139250431237259,
"repo_name": "sekikn/incubator-airflow",
"id": "a3c8c9676f3febd1ff3ca49014cf3a69f404a0b8",
"size": "7165",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/singularity/operators/singularity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import six
from django.conf import settings
from django.utils.module_loading import import_string
from horizon import defaults
def import_object(name_or_object):
if isinstance(name_or_object, six.string_types):
return import_string(name_or_object)
return name_or_object
def import_setting(name):
"""Imports an object specified either directly or as a module path."""
value = getattr(settings, name, None)
return import_object(value)
# NOTE(amotoki):
# This is a copy from openstack_dashboard.utils.settings.get_dict_config().
# This copy is needed to look up defaults for horizon.defaults
# instead of openstack_dashboard.defaults.
# NOTE(amotoki): The limitation of this approach is that we cannot handle
# a case where default values in horizon are overridden by
# openstack_dashboard.defaults. This can be addressed by set_override()
# from oslo.config.
# TODO(amotoki): This copy might be cleanup if we can use oslo.config
# for horizon configurations.
def get_dict_config(name, key):
config = getattr(settings, name)
if key in config:
return config[key]
return getattr(defaults, name)[key]
|
{
"content_hash": "41b858c0a1463d95625f65f6220150aa",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 32.885714285714286,
"alnum_prop": 0.7445699391833188,
"repo_name": "NeCTAR-RC/horizon",
"id": "6adffead1d94d3d27b10acbf64dcebdb70577451",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/train",
"path": "horizon/utils/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "598098"
},
{
"name": "JavaScript",
"bytes": "2474550"
},
{
"name": "Python",
"bytes": "5323984"
},
{
"name": "SCSS",
"bytes": "132603"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
}
|
"""
kaoru.commands.hello
~~~~~~~~
/hello command implementation
:copyright: (c) 2015 by Alejandro Ricoveri
:license: MIT, see LICENSE for more details.
"""
from telegram import Update
from .. import utils
from . import bot_command
_greetings = [
# English
'Affirmative Dave, I read you',
'Hello world!',
# Spanish
'Hola',
# Arabic
'أهلاً و سهلاً',
# Mandarin
'你好',
# Corsican
'Salute',
# French
'Salut', 'Bonjour!, est-ce que vous allez bien?',
# Danish
'Hej',
# German
'Hallo',
'Guten tag!',
# Italian
'Ciao',
# Japanese
'今日は',
# Klingon
'nuqneH',
# Farsi
'سلام',
# Turkish
'Merhaba',
]
# /hello command
@bot_command
def _cmd_handler(bot, update):
"""a rather simple ping command"""
if isinstance(update, Update):
utils.echo_msg( bot, update, utils.select_rand_str(_greetings))
else:
utils.echo_msg(bot, update, utils.select_rand_str(_greetings))
desc = 'See if I "live"' # This command's description
cmd_handler = _cmd_handler # command handler
cmd_str = 'hello' # command /string
|
{
"content_hash": "2060d33b9a63b117f1b58b49e80bef8b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 16.140845070422536,
"alnum_prop": 0.5942408376963351,
"repo_name": "axltxl/kaoru",
"id": "8b26a4624512e48f56f23e8d673c6a5c0fe15084",
"size": "1196",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kaoru/commands/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47901"
}
],
"symlink_target": ""
}
|
import os
import pytest
from bokeh.io import output_file
from .screenshot import Screenshot
def pytest_addoption(parser):
parser.addoption(
"--set-new-base-screenshot", dest="set_new_base_screenshot", action="store_true", default=False, help="Use to set a new screenshot for imagediff testing. Be sure to only set for the tests you want by usign the -k pytest option to select your test."
)
@pytest.fixture
def selenium(selenium):
# Give items a chance to load
selenium.implicitly_wait(10)
selenium.set_window_size(width=1200, height=600)
return selenium
@pytest.fixture(scope='session')
def base_url(request, file_server):
return file_server.where_is('')
@pytest.fixture
def output_file_url(request, file_server):
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
output_file(file_path, mode='inline')
def tearDown():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tearDown)
return file_server.where_is(file_path)
def pytest_generate_tests(metafunc):
# hasattr(metafunc.function, "foo") is like doing item.get_marker("foo")
# This is ugly, but unfortunately there's not currently a better interface
# https://github.com/pytest-dev/pytest/issues/1425
if hasattr(metafunc.function, "cross_browser"):
if metafunc.config.option.driver == "SauceLabs":
cross_browser_list = [
{
"browserName": "firefox",
"platform": "Linux",
"version": None
},
{
"browserName": "chrome",
"platform": "Linux",
"version": None
},
]
metafunc.fixturenames.append('test_browser')
metafunc.parametrize('test_browser', cross_browser_list, ids=["firefox", "chrome"])
@pytest.fixture()
def test_browser():
# If version is None, latest will be used
return {"browserName": "firefox", "platform": "Linux", "version": None}
@pytest.fixture()
def capabilities(capabilities, test_browser):
capabilities["browserName"] = test_browser["browserName"]
capabilities["platform"] = test_browser["platform"]
if test_browser["version"]:
capabilities["version"] = test_browser["version"]
return capabilities
@pytest.fixture(scope="session")
def session_capabilities(session_capabilities):
session_capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
return session_capabilities
@pytest.fixture
def screenshot(request):
# Screenshot tests can only be run under the following circumstances:
# - driver: SauceLabs
# - capabilities: browserName: firefox
# - capabilities: platform: linux
# This helps ensure that screenshots are comparable.
if request.config.option.driver != 'SauceLabs':
pytest.skip('Screenshot tests can only be run with --driver=SauceLabs')
capabilities = request.getfuncargvalue('capabilities')
if capabilities['browserName'] != 'firefox':
pytest.skip('Screenshot tests can only be run with browserName firefox. Capabilties are: %s' % capabilities)
if capabilities['platform'] != 'Linux':
pytest.skip('Screenshot tests can only be run with platform linux. Capabilities are: %s' % capabilities)
if request.config.option.set_new_base_screenshot:
screenshot = Screenshot(request=request, set_new_base=True)
else:
screenshot = Screenshot(request=request, set_new_base=False)
return screenshot
#
# Hook into the pytest report to add the screnshot diff
#
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
# Only run through this at the end of the test
if call.when != 'call':
return
# Don't continue if this isn't a screenshot test
if 'screenshot' not in item.fixturenames:
return
# Don't add screenshots if we can't create a screenshot
try:
screenshot = Screenshot(item=item)
except AssertionError:
return
report = outcome.get_result()
xfail = hasattr(report, 'wasxfail')
failure = (report.skipped and xfail) or (report.failed and not xfail)
pytest_html = item.config.pluginmanager.getplugin('html')
# Don't add screenshots if test passed
if failure:
diff = pytest_html.extras.image(screenshot.get_diff_as_base64(), '')
base = pytest_html.extras.image(screenshot.get_base_as_base64(), '')
test = pytest_html.extras.image(screenshot.get_current_as_base64(), '')
# Override existing extra and logs for screenshot tests (keeps output manageable)
report.extra = [test, diff, base]
report.longrepr = ''
|
{
"content_hash": "22f13e3c06923b7158555fee52b33263",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 256,
"avg_line_length": 33.41379310344828,
"alnum_prop": 0.6619195046439629,
"repo_name": "msarahan/bokeh",
"id": "d2fdb25c6c09688c648307bed052a84b4a468f3e",
"size": "4845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/integration_tests_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "409581"
},
{
"name": "CoffeeScript",
"bytes": "734298"
},
{
"name": "HTML",
"bytes": "27676"
},
{
"name": "JavaScript",
"bytes": "8811"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "5842"
},
{
"name": "Python",
"bytes": "1779736"
},
{
"name": "Shell",
"bytes": "17605"
}
],
"symlink_target": ""
}
|
"""
Module's constants for the modes of operation supported with AES:
:var MODE_ECB: :ref:`Electronic Code Book (ECB) <ecb_mode>`
:var MODE_CBC: :ref:`Cipher-Block Chaining (CBC) <cbc_mode>`
:var MODE_CFB: :ref:`Cipher FeedBack (CFB) <cfb_mode>`
:var MODE_OFB: :ref:`Output FeedBack (OFB) <ofb_mode>`
:var MODE_CTR: :ref:`CounTer Mode (CTR) <ctr_mode>`
:var MODE_OPENPGP: :ref:`OpenPGP Mode <openpgp_mode>`
:var MODE_CCM: :ref:`Counter with CBC-MAC (CCM) Mode <ccm_mode>`
:var MODE_EAX: :ref:`EAX Mode <eax_mode>`
:var MODE_GCM: :ref:`Galois Counter Mode (GCM) <gcm_mode>`
:var MODE_SIV: :ref:`Syntethic Initialization Vector (SIV) <siv_mode>`
:var MODE_OCB: :ref:`Offset Code Book (OCB) <ocb_mode>`
"""
import sys
from Cryptodome.Cipher import _create_cipher
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, c_uint8_ptr)
from Cryptodome.Util import _cpu_features
from Cryptodome.Random import get_random_bytes
_cproto = """
int AES_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int AES_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int AES_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int AES_stop_operation(void *state);
"""
# Load portable AES
_raw_aes_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_aes",
_cproto)
# Try to load AES with AES NI instructions
try:
_raw_aesni_lib = None
if _cpu_features.have_aes_ni():
_raw_aesni_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_aesni",
_cproto.replace("AES",
"AESNI"))
# _raw_aesni may not have been compiled in
except OSError:
pass
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
use_aesni = dict_parameters.pop("use_aesni", True)
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
if len(key) not in key_size:
raise ValueError("Incorrect AES key length (%d bytes)" % len(key))
if use_aesni and _raw_aesni_lib:
start_operation = _raw_aesni_lib.AESNI_start_operation
stop_operation = _raw_aesni_lib.AESNI_stop_operation
else:
start_operation = _raw_aes_lib.AES_start_operation
stop_operation = _raw_aes_lib.AES_stop_operation
cipher = VoidPointer()
result = start_operation(c_uint8_ptr(key),
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the AES cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def _derive_Poly1305_key_pair(key, nonce):
"""Derive a tuple (r, s, nonce) for a Poly1305 MAC.
If nonce is ``None``, a new 16-byte nonce is generated.
"""
if len(key) != 32:
raise ValueError("Poly1305 with AES requires a 32-byte key")
if nonce is None:
nonce = get_random_bytes(16)
elif len(nonce) != 16:
raise ValueError("Poly1305 with AES requires a 16-byte nonce")
s = new(key[:16], MODE_ECB).encrypt(nonce)
return key[16:], s, nonce
def new(key, mode, *args, **kwargs):
"""Create a new AES cipher.
:param key:
The secret key to use in the symmetric cipher.
It must be 16, 24 or 32 bytes long (respectively for *AES-128*,
*AES-192* or *AES-256*).
For ``MODE_SIV`` only, it doubles to 32, 48, or 64 bytes.
:type key: bytes/bytearray/memoryview
:param mode:
The chaining mode to use for encryption or decryption.
If in doubt, use ``MODE_EAX``.
:type mode: One of the supported ``MODE_*`` constants
:Keyword Arguments:
* **iv** (*bytes*, *bytearray*, *memoryview*) --
(Only applicable for ``MODE_CBC``, ``MODE_CFB``, ``MODE_OFB``,
and ``MODE_OPENPGP`` modes).
The initialization vector to use for encryption or decryption.
For ``MODE_CBC``, ``MODE_CFB``, and ``MODE_OFB`` it must be 16 bytes long.
For ``MODE_OPENPGP`` mode only,
it must be 16 bytes long for encryption
and 18 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
If not provided, a random byte string is generated (you must then
read its value with the :attr:`iv` attribute).
* **nonce** (*bytes*, *bytearray*, *memoryview*) --
(Only applicable for ``MODE_CCM``, ``MODE_EAX``, ``MODE_GCM``,
``MODE_SIV``, ``MODE_OCB``, and ``MODE_CTR``).
A value that must never be reused for any other encryption done
with this key (except possibly for ``MODE_SIV``, see below).
For ``MODE_EAX``, ``MODE_GCM`` and ``MODE_SIV`` there are no
restrictions on its length (recommended: **16** bytes).
For ``MODE_CCM``, its length must be in the range **[7..13]**.
Bear in mind that with CCM there is a trade-off between nonce
length and maximum message size. Recommendation: **11** bytes.
For ``MODE_OCB``, its length must be in the range **[1..15]**
(recommended: **15**).
For ``MODE_CTR``, its length must be in the range **[0..15]**
(recommended: **8**).
For ``MODE_SIV``, the nonce is optional, if it is not specified,
then no nonce is being used, which renders the encryption
deterministic.
If not provided, for modes other than ``MODE_SIV```, a random
byte string of the recommended length is used (you must then
read its value with the :attr:`nonce` attribute).
* **segment_size** (*integer*) --
(Only ``MODE_CFB``).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
* **mac_len** : (*integer*) --
(Only ``MODE_EAX``, ``MODE_GCM``, ``MODE_OCB``, ``MODE_CCM``)
Length of the authentication tag, in bytes.
It must be even and in the range **[4..16]**.
The recommended value (and the default, if not specified) is **16**.
* **msg_len** : (*integer*) --
(Only ``MODE_CCM``). Length of the message to (de)cipher.
If not specified, ``encrypt`` must be called with the entire message.
Similarly, ``decrypt`` can only be called once.
* **assoc_len** : (*integer*) --
(Only ``MODE_CCM``). Length of the associated data.
If not specified, all associated data is buffered internally,
which may represent a problem for very large messages.
* **initial_value** : (*integer* or *bytes/bytearray/memoryview*) --
(Only ``MODE_CTR``).
The initial value for the counter. If not present, the cipher will
start counting from 0. The value is incremented by one for each block.
The counter number is encoded in big endian mode.
* **counter** : (*object*) --
Instance of ``Cryptodome.Util.Counter``, which allows full customization
of the counter block. This parameter is incompatible to both ``nonce``
and ``initial_value``.
* **use_aesni** : (*boolean*) --
Use Intel AES-NI hardware extensions (default: use if available).
:Return: an AES object, of the applicable mode.
"""
kwargs["add_aes_modes"] = True
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
MODE_ECB = 1
MODE_CBC = 2
MODE_CFB = 3
MODE_OFB = 5
MODE_CTR = 6
MODE_OPENPGP = 7
MODE_CCM = 8
MODE_EAX = 9
MODE_SIV = 10
MODE_GCM = 11
MODE_OCB = 12
# Size of a data block (in bytes)
block_size = 16
# Size of a key (in bytes)
key_size = (16, 24, 32)
|
{
"content_hash": "d1175a3bd546cd758c4870ff0294af3a",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 86,
"avg_line_length": 37.37117903930131,
"alnum_prop": 0.5758354755784062,
"repo_name": "cloudera/hue",
"id": "dd2671a694872e9bf8caad5221f3a0b89cf5afeb",
"size": "9569",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/Cipher/AES.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
"""Test script for the dbm.open function based on testdumbdbm.py"""
import os
import unittest
import glob
import test.support
# Skip tests if dbm module doesn't exist.
dbm = test.support.import_module('dbm')
_fname = test.support.TESTFN
#
# Iterates over every database module supported by dbm currently available,
# setting dbm to use each in turn, and yielding that module
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
test.support.unlink(f)
class AnyDBMTestCase(unittest.TestCase):
_dict = {'0': b'',
'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assert_(issubclass(self.module.error, IOError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_anydbm_keys(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
assert(key in f)
assert(f[key] == b"Python:")
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def tearDown(self):
delete_files()
def setUp(self):
dbm._defaultmod = self.module
delete_files()
class WhichDBTestCase(unittest.TestCase):
# Actual test methods are added to namespace after class definition.
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_whichdb(self):
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
# Try with empty files first
name = module.__name__
if name == 'dbm.dumb':
continue # whichdb can't support dbm.dumb
test.support.unlink(_fname)
f = module.open(_fname, 'c')
f.close()
self.assertEqual(name, dbm.whichdb(_fname))
# Now add a key
f = module.open(_fname, 'w')
f[b"1"] = b"1"
# and test that we can find it
self.assertTrue(b"1" in f)
# and read it
self.assertTrue(f[b"1"] == b"1")
f.close()
self.assertEqual(name, dbm.whichdb(_fname))
def tearDown(self):
delete_files()
def setUp(self):
delete_files()
self.filename = test.support.TESTFN
self.d = dbm.open(self.filename, 'c')
self.d.close()
def test_keys(self):
self.d = dbm.open(self.filename, 'c')
self.assertEqual(self.d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
self.d[k] = v
self.assertEqual(sorted(self.d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assert_(k in self.d)
self.assertEqual(self.d[k], v)
self.assert_(b'xxx' not in self.d)
self.assertRaises(KeyError, lambda: self.d[b'xxx'])
self.d.close()
def test_main():
classes = [WhichDBTestCase]
for mod in dbm_iterator():
classes.append(type("TestCase-" + mod.__name__, (AnyDBMTestCase,),
{'module': mod}))
test.support.run_unittest(*classes)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "210a5fc0aa674ea28d8cf75e7d3cf2cb",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 76,
"avg_line_length": 28.976608187134502,
"alnum_prop": 0.5459132189707366,
"repo_name": "MalloyPower/parsing-python",
"id": "35d12e43ba8b64a82998ef4b87586020a378a9aa",
"size": "4978",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/test/test_dbm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import sys
import os
def encode_fib(n):
# Return string with Fibonacci encoding for n (n >= 1).
result = ""
if n >= 1:
a = 1
b = 1
c = a + b # next Fibonacci number
fibs = [b] # list of Fibonacci numbers, starting with F(2), each <= n
while n >= c:
fibs.append(c) # add next Fibonacci number to end of list
a = b
b = c
c = a + b
result = "1" # extra "1" at end
for fibnum in reversed(fibs):
if n >= fibnum:
n = n - fibnum
result = "1" + result
else:
result = "0" + result
return result
def byteWriter(bitStr, outputFile):
global bitStream
bitStream += bitStr
while len(bitStream) > 8: # write byte(s) if there are more then 8 bits
byteStr = bitStream[:8]
bitStream = bitStream[8:]
outputFile.write(chr(int(byteStr, 2)))
def bitReader(n): # number of bits to read
global byteArr
global bitPosition
bitStr = ''
for i in range(n):
bitPosInByte = 7 - (bitPosition % 8)
bytePosition = int(bitPosition / 8)
byteVal = byteArr[bytePosition]
bitVal = int(byteVal / (2 ** bitPosInByte)) % 2
bitStr += str(bitVal)
bitPosition += 1 # prepare to read the next bit
return bitStr
# MAIN
if len(sys.argv) != 4:
print 'Usage: Fibonacci.py [e|d] [path]InputFileName [path]OutputFileName'
sys.exit()
mode = sys.argv[1] # encoding/decoding
inputFile = sys.argv[2]
outputFile = sys.argv[3]
# read the whole input file into a byte array
fileSize = os.path.getsize(inputFile)
fi = open(inputFile, 'rb')
# byteArr = map(ord, fi.read(fileSize))
byteArr = bytearray(fi.read(fileSize))
fi.close()
fileSize = len(byteArr)
print 'File size in bytes:', fileSize
print
if mode == 'e': # FILE ENCODING
# calculate the total number of each byte value in the file
freqList = [0] * 256
for b in byteArr:
freqList[b] += 1
# create a list of (frequency, byteValue, encodingBitStr) tuples
tupleList = []
for b in range(256):
if freqList[b] > 0:
tupleList.append((freqList[b], b, ''))
# sort the list according to the frequencies descending
tupleList = sorted(tupleList, key=lambda tup: tup[0], reverse = True)
# assign encoding bit strings to each byte value
for b in range(len(tupleList)):
tupleList[b] = (tupleList[b][0], tupleList[b][1], encode_fib(b + 1))
# print 'The list of (frequency, byteValue, encodingBitStr) tuples:'
# print tupleList
# print
# write the list of byte values as the compressed file header
bitStream = '' # global
fo = open(outputFile, 'wb')
fo.write(chr(len(tupleList) - 1)) # first write the number of byte values
for (freq, byteValue, encodingBitStr) in tupleList:
# convert the byteValue into 8-bit and send to be written into file
# bitStr = bin(byteValue)
# bitStr = bitStr[2:] # remove 0b
# bitStr = '0' * (8 - len(bitStr)) + bitStr # add 0's if needed for 8 bits
# byteWriter(bitStr, fo)
fo.write(chr(byteValue)) # this would do the same
# write 32-bit (input file size)-1 value
bitStr = bin(fileSize - 1)
bitStr = bitStr[2:] # remove 0b
bitStr = '0' * (32 - len(bitStr)) + bitStr # add 0's if needed for 32 bits
byteWriter(bitStr, fo)
# create a dictionary of byteValue : encodingBitStr pairs
dic = dict([(tup[1], tup[2]) for tup in tupleList])
# del tupleList
# print 'The dictionary of byteValue : encodingBitStr pairs:'
# print dic
# write the encoded data
for b in byteArr:
byteWriter(dic[b], fo)
byteWriter('0' * 8, fo) # to write the last remaining bits (if any)
fo.close()
elif mode == 'd': # FILE DECODING
bitPosition = 0 # global
n = int(bitReader(8), 2) + 1 # first read the number of byte values
# print 'Number of byte values:', n
dic = dict()
for i in range(n):
# read the byteValue
byteValue = int(bitReader(8), 2)
encodingBitStr = encode_fib(i + 1)
dic[encodingBitStr] = byteValue # add to the dictionary
# print 'The dictionary of encodingBitStr : byteValue pairs:'
# print dic
# print
# read 32-bit file size (number of encoded bytes) value
numBytes = long(bitReader(32), 2) + 1
print 'Number of bytes to decode:', numBytes
# read the encoded data, decode it, write into the output file
fo = open(outputFile, 'wb')
for b in range(numBytes):
# read bits until a decoding match is found
encodingBitStr = ''
while True:
encodingBitStr += bitReader(1)
# if encodingBitStr in dic:
if encodingBitStr.endswith('11'):
byteValue = dic[encodingBitStr]
fo.write(chr(byteValue))
break
fo.close()
|
{
"content_hash": "4f14a7c0ac0d4fb21ffb83ad4287fa5e",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 82,
"avg_line_length": 33.439189189189186,
"alnum_prop": 0.6013336027480299,
"repo_name": "ActiveState/code",
"id": "f9e8903edde31aadd610c380111fcf37d7fb9a72",
"size": "5087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577554_Fibonacci_Data_Compression/recipe-577554.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
"""
The Django settings for TopicMaps in Production
"""
##########################################################################
## Imports
##########################################################################
import os
from .base import *
##########################################################################
## Production Settings
##########################################################################
## Debugging Settings
DEBUG = False
## Hosts
ALLOWED_HOSTS = ['ddl-topicmaps.herokuapp.com', 'topicmaps.districtdatalabs.com']
## Static files served by WhiteNoise
STATIC_ROOT = 'staticfiles'
|
{
"content_hash": "c9b6a8a4a10d23a2bc000cd7ce0f0f53",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 27.47826086956522,
"alnum_prop": 0.370253164556962,
"repo_name": "DistrictDataLabs/topicmaps",
"id": "4d0f4ace066bba1fa4c1bc45043dda07ed5d3092",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topicmaps/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "990"
},
{
"name": "HTML",
"bytes": "50492"
},
{
"name": "JavaScript",
"bytes": "9466"
},
{
"name": "Makefile",
"bytes": "1936"
},
{
"name": "Python",
"bytes": "44419"
}
],
"symlink_target": ""
}
|
import matplotlib as mpl
import IPython
from IPython.core.magic import register_line_magic
@register_line_magic
def mpl_setup(line):
IPython.get_ipython().magic('pylab --no-import-all inline')
try:
import seaborn as sns
sns.set(style="darkgrid")
except ImportError:
from warnings import warn
warn('Seaborn not installed')
mpl.rcParams['font.sans-serif'] = 'Helvetica Neue, Helvetica, Avant Garde, Computer Modern Sans serif'
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.pad_inches'] = 0
mpl.rcParams['text.usetex'] = False
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['font.size'] = 15
mpl.rcParams['axes.labelsize'] = 17
mpl.rcParams['axes.titlesize'] = 17
mpl.rcParams['xtick.labelsize'] = 15
mpl.rcParams['ytick.labelsize'] = 15
mpl.rcParams['legend.fontsize'] = 17
|
{
"content_hash": "a9acd43fe3a5b361a80b377f0b0777da",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.674217907227616,
"repo_name": "zakandrewking/mpl_recipes",
"id": "bb5ceca714d56a7c501b1a270bd247d25b653f22",
"size": "927",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mpl_recipes/mpl_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4106"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
from flask import render_template
from ipydra import models
from ipydra import BASE_URL
bp = Blueprint('admin', __name__)
@bp.route('/')
def listing():
return render_template('admin.jinja.html',
BASE_URL=BASE_URL,
users=models.User.query.all())
|
{
"content_hash": "08efaa8edcec3b0dd80a29b03b76cba4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 25.692307692307693,
"alnum_prop": 0.6167664670658682,
"repo_name": "UnataInc/ipydra",
"id": "a5900dbf9bd71f2359e0e2e506aba56fe6db7ab0",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipydra/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2420"
},
{
"name": "Python",
"bytes": "7194"
}
],
"symlink_target": ""
}
|
import os
import re
import shutil
import subprocess
import sys
from bs4 import BeautifulSoup
import requests
def get_page(url):
return BeautifulSoup(requests.get("https://github.com" + url).text)
languages = []
languages_page = get_page("/languages")
list_items = languages_page.find(class_="all_languages").find("ul").find_all("li")
for list_item in list_items:
link = list_item.find("a")
if not link: continue
language_page = get_page(link["href"])
rank_text = language_page.find("h1").text
match = re.search("is the #(\d+) most popular", rank_text)
rank = 1 # Default to 1 to handle "the most".
if match:
rank = int(match.group(1))
# Erase the line.
print "\033[2K",
print "\r{}/{} {} is #{}".format(len(languages), len(list_items), link.text, rank),
sys.stdout.flush()
languages.append((rank, link.text))
# Erase the line.
print "\033[2K",
print "\r",
languages.sort()
for rank, language in languages:
print "#" + str(rank) + " " + language
|
{
"content_hash": "4638d4f1a522aa4d54efe8daba5e00e6",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 85,
"avg_line_length": 24.048780487804876,
"alnum_prop": 0.6683569979716024,
"repo_name": "munificent/github-language-ranking",
"id": "6aafaedb26a4225fd79bc35ecc5d1611965fa5d4",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "986"
}
],
"symlink_target": ""
}
|
"""Import utilities."""
from __future__ import absolute_import
def import_member(import_string):
"""Import one member of Python module by path.
>>> import os.path
>>> imported = import_member('os.path.supports_unicode_filenames')
>>> os.path.supports_unicode_filenames is imported
True
"""
module_name, factory_name = str(import_string).rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [factory_name], 0)
return getattr(module, factory_name)
|
{
"content_hash": "5fadeeb7d78ab01e0f54c1d1506366ee",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 31.375,
"alnum_prop": 0.6633466135458167,
"repo_name": "benoitbryon/django-confit",
"id": "504da66117a6d08dd81c91829fe50dfaf49dd788",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_confit/utils/importlib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "792"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "Python",
"bytes": "252838"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from model_mommy import mommy
from lxml import html
from apps.boards.models import Board
from apps.boards.views import BoardListView
from apps.core.tests.utils import LoggedTestCase
from apps.issues.models import Issue
from apps.teams.models import Team
class BoardDetailViewTest(LoggedTestCase):
urls = 'kanboard.urls'
def get_view(self):
return BoardListView.as_view()
def setUp(self):
super(BoardDetailViewTest, self).setUp()
self.team = Team.objects.create(name='test-team')
self.team.users.add(self.user)
self.board = mommy.make(Board, team=self.team)
response = self.view(self.request, pk=self.board.pk)
self.dom = html.fromstring(response.rendered_content)
def test_should_have_a_list_of_boards(self):
board = self.dom.cssselect('.table-responsive tbody tr a')[0]
self.assertEqual(board.text, self.board.name)
def test_board_item_should_be_a_link_to_betailed_board(self):
board = self.dom.cssselect('.table-responsive tbody tr a')[0]
self.assertEqual(board.attrib['href'], reverse("board-detail", kwargs={"pk": self.board.id}))
def test_issue_should_have_icebox_panel_when_has_issue_without_board(self):
mommy.make(Issue)
response = self.view(self.request, pk=self.board.pk)
dom = html.fromstring(response.rendered_content)
title = dom.cssselect('.icebox .panel h3.panel-title')[0]
self.assertEqual(title.text, "ICEBOX")
def test_issue_should_be_on_icebox_when_it_any_other_board(self):
issue = mommy.make(Issue)
response = self.view(self.request, pk=self.board.pk)
dom = html.fromstring(response.rendered_content)
panel = dom.cssselect('.icebox .panel .panel-body')[0]
title = panel.cssselect('.issue-item.badge')[0]
self.assertEqual(title.text.strip(), "#{0} {1}".format(issue.id, issue.name))
def test_redirect_if_not_logged(self):
self.assertRedirectIfNotLogged()
|
{
"content_hash": "e40f50e1984c8e374b3652c82a03e715",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 101,
"avg_line_length": 39.36538461538461,
"alnum_prop": 0.6873473375671715,
"repo_name": "petry/kanboard",
"id": "121354b5e0d38cd07ebadce96f3c5f7210ce70f2",
"size": "2047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/boards/tests/test_board_list_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159"
},
{
"name": "JavaScript",
"bytes": "401"
},
{
"name": "Python",
"bytes": "108940"
}
],
"symlink_target": ""
}
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://python.sourceforge.net/peps/pep-0205.html
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
{
"content_hash": "5d9c885e88b5c2ca1e97e85d09b3737c",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 76,
"avg_line_length": 28.385915492957746,
"alnum_prop": 0.5599880916939566,
"repo_name": "babble/babble",
"id": "4f6d757fe3bb4a8a2efa38f5d7170713ed4649da",
"size": "10077",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "include/jython/Lib/weakref.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3378"
},
{
"name": "Groovy",
"bytes": "16151"
},
{
"name": "Java",
"bytes": "7316421"
},
{
"name": "JavaScript",
"bytes": "644844"
},
{
"name": "Python",
"bytes": "10107943"
},
{
"name": "Ruby",
"bytes": "4961765"
},
{
"name": "Shell",
"bytes": "2575"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
"""Tests for Incremental PCA."""
import numpy as np
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
from scipy import sparse
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
assert X_transformed.shape == (X.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13
)
@pytest.mark.parametrize(
"matrix_class", [sparse.csc_matrix, sparse.csr_matrix, sparse.lil_matrix]
)
def test_incremental_pca_sparse(matrix_class):
# Incremental PCA on sparse arrays.
X = iris.data
pca = PCA(n_components=2)
pca.fit_transform(X)
X_sparse = matrix_class(X)
batch_size = X_sparse.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
X_transformed = ipca.fit_transform(X_sparse)
assert X_transformed.shape == (X_sparse.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X_sparse)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13
)
with pytest.raises(
TypeError,
match="IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches.",
):
ipca.partial_fit(X_sparse)
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = np.array([[0, 1, 0], [1, 0, 0]])
n_samples, n_features = X.shape
for n_components in [-1, 0, 0.99, 4]:
with pytest.raises(
ValueError,
match="n_components={} invalid"
" for n_features={}, need more rows than"
" columns for IncrementalPCA"
" processing".format(n_components, n_features),
):
IncrementalPCA(n_components, batch_size=10).fit(X)
# Tests that n_components is also <= n_samples.
n_components = 3
with pytest.raises(
ValueError,
match="n_components={} must be"
" less or equal to the batch number of"
" samples {}".format(n_components, n_samples),
):
IncrementalPCA(n_components=n_components).partial_fit(X)
def test_n_components_none():
# Ensures that n_components == None is handled correctly
rng = np.random.RandomState(1999)
for n_samples, n_features in [(50, 10), (10, 50)]:
X = rng.rand(n_samples, n_features)
ipca = IncrementalPCA(n_components=None)
# First partial_fit call, ipca.n_components_ is inferred from
# min(X.shape)
ipca.partial_fit(X)
assert ipca.n_components_ == min(X.shape)
# Second partial_fit call, ipca.n_components_ is inferred from
# ipca.components_ computed from the first partial_fit call
ipca.partial_fit(X)
assert ipca.n_components_ == ipca.components_.shape[0]
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
# Increasing number of components
ipca.set_params(n_components=15)
with pytest.raises(ValueError):
ipca.partial_fit(X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_batch_rank():
# Test sample size in each batch is always larger or equal to n_components
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 90, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for components_i, components_j in zip(all_components[:-1], all_components[1:]):
assert_allclose_dense_sparse(components_i, components_j)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(
1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999
)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(
pca.explained_variance_, ipca.explained_variance_, decimal=prec
)
assert_almost_equal(
pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec
)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng
)
pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(
np.sum(pca.singular_values_ ** 2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12
)
assert_array_almost_equal(
np.sum(ipca.singular_values_ ** 2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2
)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(
pca.singular_values_, np.sqrt(np.sum(X_pca ** 2.0, axis=0)), 12
)
assert_array_almost_equal(
ipca.singular_values_, np.sqrt(np.sum(X_ipca ** 2.0, axis=0)), 2
)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng
)
pca = PCA(n_components=3, svd_solver="full", random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca ** 2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(
1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999
)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
def test_incremental_pca_partial_fit_float_division():
# Test to ensure float division is used in all versions of Python
# (non-regression test for issue #9489)
rng = np.random.RandomState(0)
A = rng.randn(5, 3) + 2
B = rng.randn(7, 3) + 5
pca = IncrementalPCA(n_components=2)
pca.partial_fit(A)
# Set n_samples_seen_ to be a floating point number instead of an int
pca.n_samples_seen_ = float(pca.n_samples_seen_)
pca.partial_fit(B)
singular_vals_float_samples_seen = pca.singular_values_
pca2 = IncrementalPCA(n_components=2)
pca2.partial_fit(A)
pca2.partial_fit(B)
singular_vals_int_samples_seen = pca2.singular_values_
np.testing.assert_allclose(
singular_vals_float_samples_seen, singular_vals_int_samples_seen
)
def test_incremental_pca_fit_overflow_error():
# Test for overflow error on Windows OS
# (non-regression test for issue #17693)
rng = np.random.RandomState(0)
A = rng.rand(500000, 2)
ipca = IncrementalPCA(n_components=2, batch_size=10000)
ipca.fit(A)
pca = PCA(n_components=2)
pca.fit(A)
np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
|
{
"content_hash": "e0c64a5e013f41a75a5f9fdd84a4158c",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 87,
"avg_line_length": 34.262411347517734,
"alnum_prop": 0.6483819775063824,
"repo_name": "amueller/scikit-learn",
"id": "25096bbea5ad9b93eeb176874264c12064ed2c80",
"size": "14493",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/decomposition/tests/test_incremental_pca.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41206"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9958394"
},
{
"name": "Shell",
"bytes": "44588"
}
],
"symlink_target": ""
}
|
import base64
import six
from .. import utils
class ConfigApiMixin(object):
@utils.minimum_version('1.25')
def create_config(self, name, data, labels=None):
"""
Create a config
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
Returns (dict): ID of the newly created config
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/configs/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_config(self, id):
"""
Retrieve config metadata
Args:
id (string): Full ID of the config to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_config(self, id):
"""
Remove a config
Args:
id (string): Full ID of the config to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def configs(self, filters=None):
"""
List configs
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs
"""
url = self._url('/configs')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
|
{
"content_hash": "105593ab774770ece0b0971581f87b3c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 74,
"avg_line_length": 27.197802197802197,
"alnum_prop": 0.5111111111111111,
"repo_name": "vpetersson/docker-py",
"id": "b46b09c7c11a06ff92fa25b654f2ef511f13f57e",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/api/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3260"
},
{
"name": "Python",
"bytes": "868992"
},
{
"name": "Shell",
"bytes": "749"
}
],
"symlink_target": ""
}
|
import logging
import os
import random
import re
import time
from queue import Queue
from functools import wraps
from jinja2 import evalcontextfilter, Markup, escape
from werkzeug.wrappers import Response
from flask import Flask, redirect, request, render_template
from flask_restful import reqparse, Resource, Api
import sqlalchemy.exc
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
app = Flask(__name__)
api = Api(app)
postparser = reqparse.RequestParser()
postparser.add_argument('text', required=True, help="The text to save")
postparser.add_argument('secret', type=bool, help="Make a secret paste")
getparser = reqparse.RequestParser()
getparser.add_argument('page', type=int, default=1, help="Page of the result")
limiter_dict = {}
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
_text = 'DsU~CF6hjX2u5QpolMWaNmLr8keVqzR0_3tn7HdOyJbZ.TI1AgfExB4SP9GiwYcvK-'
_base = len(_text)
def number_to_text(number):
text = ""
if number == 0:
text += _text[0]
while number:
text += _text[number % _base]
number = number // _base
return text
def text_to_number(tekst):
number = 0
for i, character in enumerate(tekst):
number += _text.index(character) * _base ** i
return number
def check_limit(token):
if token in limiter_dict:
while len(limiter_dict[token].queue) > 0 and limiter_dict[token].queue[0] < time.time():
limiter_dict[token].get_nowait()
else:
limiter_dict[token] = Queue()
if len(limiter_dict[token].queue) <= 20:
limiter_dict[token].put_nowait(int(time.time() + 60 * 60))
return int(os.environ["GLOBAL_RATELIMIT"]) - len(limiter_dict[token].queue), limiter_dict[token].queue[0]
def limit(f):
@wraps(f)
def wrapper(*args, **kwargs):
remaining, reset = check_limit(request.remote_addr)
if remaining < 0:
return ({
'status': 429,
'message': 'API limit reached'
}, 429,
{
'RateLimit-Limit': os.environ["GLOBAL_RATELIMIT"],
'RateLimit-Remaining': remaining,
'RateLimit-Reset': reset
})
resp = api.make_response(*f(*args, **kwargs))
resp.headers['RateLimit-Limit'] = os.environ["GLOBAL_RATELIMIT"]
resp.headers['RateLimit-Remaining'] = remaining
resp.headers['RateLimit-Reset'] = reset
return resp
return wrapper
engine = create_engine(os.environ["DB_URL"], echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
class NormalPaste(Base):
__tablename__ = "pastes"
id = Column(Integer, primary_key=True)
text = Column(String)
def __repr__(self):
return "/{} = {}".format(number_to_text(self.id), self.text)
class SecretPaste(Base):
__tablename__ = "secretpastes"
id = Column(String, primary_key=True)
text = Column(String)
def __repr__(self):
return "/{} = {}".format(self.id, self.text)
Base.metadata.create_all(bind=engine)
def get_paste(paste_id):
if paste_id.startswith('+'):
return get_secret_paste(paste_id)
pasteid = text_to_number(paste_id)
paste = NormalPaste.query.filter(NormalPaste.id == pasteid).first()
return paste.text
def add_paste(text):
newpaste = NormalPaste(text=text)
db_session.add(newpaste)
db_session.commit()
if number_to_text(newpaste.id) == 'pastes':
return -1
return newpaste.id
def get_secret_paste(paste_id):
paste = SecretPaste.query.filter(SecretPaste.id == paste_id).first()
return paste.text
def add_secret_paste(paste):
pasteid = ""
tries = 0
length = 5
while SecretPaste.query.filter(SecretPaste.id == pasteid).first() or pasteid == "":
if tries > 10:
length += 1
tries = 0
pasteid = '+' + ''.join([random.choice(_text) for _ in range(length)])
tries += 1
newpaste = SecretPaste(id=pasteid, text=paste)
db_session.add(newpaste)
db_session.commit()
return pasteid
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/<string:paste_id>')
def show_paste(paste_id):
try:
return render_template("showpaste.html", paste_content=get_paste(paste_id))
except Exception as e:
logger.exception(e)
return "404 Not found", 404
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
class Paste(Resource):
@limit
def get(self, paste_id):
try:
return {'status': 200, 'message': get_paste(paste_id)}, 200
except:
return {'status': 404, 'message': 'Not Found'}, 404
class PasteList(Resource):
@limit
def get(self):
try:
args = getparser.parse_args()
page = args['page'] - 1
print("page: {}".format(page))
pastes = NormalPaste.query.order_by(NormalPaste.id.desc()).limit(25).offset(25 * page)
allpastes = [number_to_text(paste.id) for paste in pastes]
except Exception as e:
logging.exception(e)
return {'status': 500, 'message': 'Internal Server Error'}, 500
else:
return {'status': 200, 'message': allpastes}, 200
@limit
def post(self):
args = postparser.parse_args()
if args['text'] == "":
return {'status': 400, 'message': "Paste text cannot be empty."}, 400
else:
logging.debug("Making new paste with content: {}".format(args['text']))
if args['secret']:
try:
pasteid = add_secret_paste(args['text'])
except Exception as e:
logging.exception(e)
return {'status': 500, 'message': 'Internal Server Error'}, 500
else:
return {'status': 200, 'message': pasteid}, 200
try:
pasteid = -1
while pasteid == -1:
pasteid = add_paste(args['text'])
except Exception as e:
logging.exception(e)
return {'status': 500, 'message': 'Internal Server Error'}, 500
else:
return {'status': 200, 'message': number_to_text(pasteid)}, 200
api.add_resource(Paste, '/pastes/<string:paste_id>')
api.add_resource(PasteList, '/pastes/')
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "cd55999425877b60ee59678070bcf1c7",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 109,
"avg_line_length": 30.669642857142858,
"alnum_prop": 0.6,
"repo_name": "pingiun/textshorten",
"id": "10178edd2d2ffb603881655917e9e6fd542584b0",
"size": "6870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "textshorten.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "308"
},
{
"name": "HTML",
"bytes": "1990"
},
{
"name": "Python",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import boto
import sure # noqa
from moto import mock_ec2
@mock_ec2
def test_placement_groups():
pass
|
{
"content_hash": "846d8a0612fe6d28452e3212740453dc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 15.9,
"alnum_prop": 0.6855345911949685,
"repo_name": "william-richard/moto",
"id": "bc389488b66b0abc258d7c87474401731696275d",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ec2/test_placement_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from keras import backend as K
import numpy as np
from keras.utils.generic_utils import get_from_module
from six.moves import zip
from scipy.sparse import csr_matrix
from theano import sparse
import theano
def clip_norm(g, c, n):
if c > 0:
g = K.switch(n >= c, g * c / n, g)
return g
def kl_divergence(p, p_hat):
return p_hat - p + p * K.log(p / p_hat)
class Optimizer(object):
'''Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
'''
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise Exception('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_state(self):
return [K.get_value(u[0]) for u in self.updates]
def set_state(self, value_list):
assert len(self.updates) == len(value_list)
for u, v in zip(self.updates, value_list):
K.set_value(u[0], v)
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
'''Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
# Arguments
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
'''
params = self.weights
if len(params) != len(weights):
raise Exception('Provided weight array does not match weights (' +
str(len(params)) + ' optimizer params vs. ' +
str(len(weights)) + ' provided weights)')
for p, w in zip(params, weights):
if K.get_value(p).shape != w.shape:
raise Exception('Optimizer weight shape ' +
str(K.get_value(p).shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
K.set_value(p, w)
def get_weights(self):
'''Returns the current weights of the optimizer,
as a list of numpy arrays.
'''
weights = []
for p in self.weights:
weights.append(K.get_value(p))
return weights
def get_config(self):
config = {'name': self.__class__.__name__}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
class SGD(Optimizer):
'''Stochastic gradient descent, with support for momentum,
learning rate decay, and Nesterov momentum.
# Arguments
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter updates momentum.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
'''
def __init__(self, lr=0.01, momentum=0., decay=0.,
nesterov=False,
sparse=None,
**kwargs):
super(SGD, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = K.variable(0.)
self.lr = K.variable(lr)
self.momentum = K.variable(momentum)
self.decay = K.variable(decay)
def get_updates(self, params, constraints, loss):
#print 'in SGD get_updates'
grads = self.get_gradients(loss, params)
lr = self.lr * (1. / (1. + self.decay * self.iterations))
self.updates = [(self.iterations, self.iterations + 1.)]
# momentum
self.weights=[]
for p in params:
#csr_matrix( (3,4), dtype=int8 )
if type(p.get_value()) is csr_matrix:
m=csr_matrix(K.get_value(p).shape,dtype='float32')
m=theano.shared(value=m, strict=False)
self.weights.append(m)
else:
self.weights.append(K.variable(np.zeros(K.get_value(p).shape)))
#self.weights = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
#print self.weights
#print self.weights[0].get_value()
for p, g, m in zip(params, grads, self.weights):
#print p.get_value()
#print type(p.get_value())
#print type(p.get_value()) is csr_matrix
#if type(p.get_value()) is csr_matrix:
# m=
#print 'p.type:',p.type
#print g
#print 'm:',m
#print g.get_value()
#v=-lr*g
#new_p=p+v
#print 'new_p:',new_p
v = self.momentum * m - lr * g # velocity
self.updates.append((m, v))
#if type(p.get_value()) is not csr_matrix:
# v = self.momentum * m - lr * g # velocity
# self.updates.append((m, v))
#else:
# v = self.momentum * m - lr * g # velocity
#v= sparse.basic.sub(self.momentum*m,lr*g)
#v=-lr*g
#print m
#print m.get_value()
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
#print 'new_p.type:',new_p.type
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
'''RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
# Arguments
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
'''
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-8, **kwargs):
super(RMSprop, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = K.variable(lr)
self.rho = K.variable(rho)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
# accumulators
self.weights = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
self.updates = []
for p, g, a in zip(params, grads, self.weights):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append((a, new_a))
new_p = p - self.lr * g / (K.sqrt(new_a) + self.epsilon)
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'epsilon': self.epsilon}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
'''Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
epsilon: float >= 0.
'''
def __init__(self, lr=0.01, epsilon=1e-8, **kwargs):
super(Adagrad, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = K.variable(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
# accumulators
self.weights = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
self.updates = []
for p, g, a in zip(params, grads, self.weights):
new_a = a + K.square(g) # update accumulator
self.updates.append((a, new_a))
new_p = p - self.lr * g / (K.sqrt(new_a) + self.epsilon)
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'epsilon': self.epsilon}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
'''Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
# References
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
'''
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-8, **kwargs):
super(Adadelta, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = K.variable(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
delta_accumulators = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
self.weights = accumulators + delta_accumulators
self.updates = []
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
self.updates.append((d_a, new_d_a))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'epsilon': self.epsilon}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
'''Adam optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
'''
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, **kwargs):
super(Adam, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = K.variable(0)
self.lr = K.variable(lr)
self.beta_1 = K.variable(beta_1)
self.beta_2 = K.variable(beta_2)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = [(self.iterations, self.iterations + 1)]
t = self.iterations + 1
lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))
ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
self.weights = ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append((m, m_t))
self.updates.append((v, v_t))
new_p = p_t
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
'''Adamax optimizer from Adam paper's Section 7. It is a variant
of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
'''
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, **kwargs):
super(Adamax, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = K.variable(0.)
self.lr = K.variable(lr)
self.beta_1 = K.variable(beta_1)
self.beta_2 = K.variable(beta_2)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = [(self.iterations, self.iterations + 1)]
t = self.iterations + 1
lr_t = self.lr / (1. - K.pow(self.beta_1, t))
# zero init of 1st moment
ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
# zero init of exponentially weighted infinity norm
us = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
self.weights = ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = K.maximum(self.beta_2 * u, K.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append((m, m_t))
self.updates.append((u, u_t))
new_p = p_t
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# aliases
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'optimizer',
instantiate=True, kwargs=kwargs)
|
{
"content_hash": "c8d32e9e26b2cd86b594262bba6834e5",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 90,
"avg_line_length": 35.77070063694268,
"alnum_prop": 0.5459995251661919,
"repo_name": "KECB/learn",
"id": "9e87b5e1ac1a7225a8144b06767b1837c0749d21",
"size": "16848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machine_learning/NN_code_release/myOptimizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1067405"
},
{
"name": "Jupyter Notebook",
"bytes": "20598783"
},
{
"name": "Python",
"bytes": "253084"
},
{
"name": "R",
"bytes": "149418"
},
{
"name": "Roff",
"bytes": "27484"
},
{
"name": "Shell",
"bytes": "455509"
}
],
"symlink_target": ""
}
|
import transvoxel
import noise
from vector import *
from euclid import *
CHUNK_SIZE=8
SIZE = CHUNK_SIZE+3
SIZE_SQ = SIZE**2
SIZE_CB = SIZE**3
class PlanetChunk(transvoxel.Chunk):
def __init__(self, offset, lod, parent=None):
transvoxel.Chunk.__init__(self, offset, CHUNK_SIZE, lod)
self.parent = parent
self.children = []
self.center = vadds(offset, CHUNK_SIZE*lod/2)
self.empty = False
class PlanetVolume:
def __init__(self, radius, height, resolution, delegate=None):
self.radius = radius
self.height = height
self.resolution = resolution
self.delegate = delegate
self.res = float(radius + height) * 2.0 / resolution
self.normRad = radius / float(radius + height)
self.normHeight = height / float(radius + height)
self.deres = 1.0 / float(radius + height)
self.extractor = transvoxel.VoxelExtractor(self)
offset = -resolution/2
self.chunks = [
PlanetChunk((offset,offset,offset), resolution/CHUNK_SIZE)
]
self.parents = []
self._meshes = {}
self.build(self.chunks[0])
self.modelmatrix = Matrix4.new_scale(self.res, self.res, self.res)
def get(self, chunk, pos):
i, j, k = vdivs(vsub(pos, chunk.offset), chunk.lod)
i, j, k = i+1, j+1, k+1
return chunk.data[i*SIZE_SQ + j*SIZE + k] - 128
def _getVal(self, pos):
pos = vmuls(pos, 2.0 / self.resolution)
base = self.normRad - vlen(pos)
extra = noise.ridged_multifractal(4, pos[0], pos[1], pos[2], 2, 2, 1, 0.5, 1, 1) * self.normHeight
val = min(1, max(-1, base + extra))
return int(val * 127 + 128)
def _addMesh(self, k, m):
if self.delegate:
self.delegate.add(m)
self._meshes[k] = m
def _removeMesh(self, k):
if self.delegate:
self.delegate.remove(self._meshes[k])
del self._meshes[k]
def meshes(self):
return self._meshes.values()
def build(self, chunk):
self._buildData(chunk)
mesh = self.extractor.extract(chunk)
if len(mesh.indices):
self._addMesh(chunk, mesh)
else:
chunk.empty = True
del chunk.data
def _buildData(self, chunk):
chunk.data = bytearray(SIZE_CB)
for i in range(SIZE):
for j in range(SIZE):
for k in range(SIZE):
pos = vadd(chunk.offset, vmuls((i-1, j-1, k-1), chunk.lod))
chunk.data[i*SIZE_SQ + j*SIZE + k] = self._getVal(pos)
def split(self, chunk):
if chunk.lod <= 1 or len(chunk.children) > 1 or chunk.empty:
return
pos = chunk.offset
lod = chunk.lod/2
s = lod*CHUNK_SIZE
children = [
PlanetChunk(vadd(pos, (0,0,0)), lod, chunk),
PlanetChunk(vadd(pos, (s,0,0)), lod, chunk),
PlanetChunk(vadd(pos, (0,0,s)), lod, chunk),
PlanetChunk(vadd(pos, (s,0,s)), lod, chunk),
PlanetChunk(vadd(pos, (0,s,0)), lod, chunk),
PlanetChunk(vadd(pos, (s,s,0)), lod, chunk),
PlanetChunk(vadd(pos, (0,s,s)), lod, chunk),
PlanetChunk(vadd(pos, (s,s,s)), lod, chunk)
]
for c in children:
self.build(c)
self._removeMesh(chunk)
chunk.children = children
self.chunks += children
self.chunks.remove(chunk)
self.parents.append(chunk)
if chunk.parent != None:
self.parents.remove(chunk)
def merge(self, chunk):
if len(chunk.children) == 0:
return
self.build(chunk)
for c in chunk.children:
self._removeMesh(c)
self.chunks.remove[c]
chunk.children = []
self.parents.remove(chunk)
self.chunks.append(chunk)
if chunk.parent != None:
self.parents.append(chunk)
def update(self, viewer):
merges, splits = {}, {}
for p in self.parents:
factor = self._splitFactor(p, viewer)
if factor > 1:
merges[factor] = p
if len(merges):
self.merge(merges[max(merges)])
for c in self.chunks:
factor = self._splitFactor(c, viewer)
if factor < 1:
splits[factor] = c
if len(splits):
self.split(splits[min(splits)])
def _splitFactor(self, chunk, viewer):
dist = 0.8*vlen(vsub(vmuls(chunk.center, self.res), viewer))
size = chunk.lod*CHUNK_SIZE * self.res
return dist / float(size)
if __name__ == "__main__":
planet = PlanetVolume(90.0, 10.0, 256)
for i in range(8):
planet.update((90, 0, 0))
k = 1
for mesh in planet.meshes():
print 'o chunk%d' % (id(mesh))
for i in range(0, len(mesh.vertices), 3):
print 'v %f %f %f' % (mesh.vertices[i], mesh.vertices[i+1], mesh.vertices[i+2])
for i in range(0, len(mesh.indices), 3):
print 'f %i %i %i' % (mesh.indices[i]+k, mesh.indices[i+1]+k, mesh.indices[i+2]+k)
k += len(mesh.vertices)/3
|
{
"content_hash": "842cc211a2c5768eabff8179ce3ed30e",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 106,
"avg_line_length": 26.358585858585858,
"alnum_prop": 0.5407166123778502,
"repo_name": "swiftcoder/aurae",
"id": "08d681bdd4978c692217b9f33d557c1d6233e671",
"size": "5241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177881"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-uecookie9',
version='0.1',
packages=['uecookie9'],
include_package_data=True,
license='MIT License',
description='Django app that shows notification about cookies to UE citizens.',
long_description=README,
url='https://github.com/9dev/django-uecookie9',
author='9dev',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires = [
'django >= 1.4',
'pygeoip >= 0.3.2',
],
)
|
{
"content_hash": "2c811881f7f9df6069513d146ca418ce",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 84,
"avg_line_length": 31.583333333333332,
"alnum_prop": 0.6218117854001759,
"repo_name": "9dev/django-uecookie9",
"id": "546bb33937bfdc6a08950fb04563bebe10dcfd14",
"size": "1137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4081"
},
{
"name": "Makefile",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "6387"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from .curriculum import Curriculum
from .fact import Fact, Competency
__author__ = 'e.kolpakov'
def get_available_facts(facts, known_facts):
"""
:param facts: frozenset[Fact]
:param known_facts: frozenset[Fact]
:rtype: frozenset[knowledge_representation.fact.Fact]
"""
# TODO: this resembles connected-component search.
# Might be a good idea to rewrite using connected-component algorithm
to_check, new_available = deepcopy(facts), deepcopy(known_facts)
available = deepcopy(known_facts)
first_iteration = True
while new_available or first_iteration:
first_iteration = False
available |= new_available
to_check -= new_available
new_available = set(fact_to_check for fact_to_check in to_check if fact_to_check.is_available(available))
return frozenset(available & facts - known_facts)
|
{
"content_hash": "93752de9f23867e19308be9428893aea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 113,
"avg_line_length": 32.035714285714285,
"alnum_prop": 0.7045707915273133,
"repo_name": "e-kolpakov/study-model",
"id": "9580b1306ce750395d876dff32311c69b937b6e9",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/knowledge_representation/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120472"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
import web
from dbclient import Beansdb
fs = Beansdb({"localhost:7900": range(16)}, 16)
class File:
def GET(self, path):
data = fs.get(path)
if data:
sys.stdout.write(data)
else:
web.notfound()
urls = (
"(/.*)", "File",
)
def runfcgi_multiprocess(func, addr=('localhost', 8000)):
import flup.server.fcgi as flups
return flups.WSGIServer(func, multithreaded=False,
multiprocess=True, bindAddress=addr).run()
web.wsgi.runfcgi = runfcgi_multiprocess
if __name__ == '__main__':
if hasattr(web, 'run'):
# web.py 0.2
web.run(urls, globals())
else:
app = web.application(urls, globals())
app.run()
|
{
"content_hash": "e4f810f09ab850b12fad01dddddc08a0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 59,
"avg_line_length": 21.514285714285716,
"alnum_prop": 0.5843293492695883,
"repo_name": "mashuai/beansdb",
"id": "246bef8758977099ab2c46f92dc836b57f4af407",
"size": "796",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/httpd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "226272"
},
{
"name": "Go",
"bytes": "26504"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "21141"
},
{
"name": "Shell",
"bytes": "47757"
}
],
"symlink_target": ""
}
|
import inspect
import urllib.request
import json
import contextlib
import threading
import time
import pytest
from gemstone.util import first_completed, as_completed
from gemstone.core import MicroService, exposed_method
from gemstone.client.remote_service import RemoteService
from gemstone.client.structs import Result, MethodCall, BatchResult, AsyncMethodCall, Notification
class TestMicroservice(MicroService):
name = "test_service"
host = "127.0.0.1"
port = 9019
@exposed_method()
def sum(self, a, b):
return a + b
@exposed_method()
def divide(self, a, b):
return a / b
@pytest.fixture(scope="module")
def microservice_url():
service = TestMicroservice()
threading.Thread(target=service.start).start()
time.sleep(1) # wait for the service to start
yield service.accessible_at
service.get_io_loop().stop()
def test_client_simple_method_call(microservice_url):
client = RemoteService(microservice_url)
result = client.call_method("sum", params=[1, 2])
assert isinstance(result, Result)
assert result.result == 3
result = client.call_method("sum", params={"a": 1, "b": 2})
assert isinstance(result, Result)
assert result.result == 3
def test_client_simple_method_call_with_errors(microservice_url):
client = RemoteService(microservice_url)
# too few positional args
result = client.call_method("sum", params=[1])
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32602
# too many positional args
result = client.call_method("sum", params=[1, 2, 3])
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32602
# too few kw args
result = client.call_method("sum", params={"a": 1})
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32602
# too many kw args
result = client.call_method("sum", params={"a": 1, "b": 2, "c": 3})
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32602
# method not found
result = client.call_method("invalid", params={"a": 1, "b": 2, "c": 3})
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32601
# internal error
result = client.call_method("sum", params=[None, 3])
assert isinstance(result, Result)
assert result.result is None
assert result.error["code"] == -32603
def test_client_simple_method_call_with_objects(microservice_url):
client = RemoteService(microservice_url)
req = MethodCall("sum", [1, 2])
result = client.call_method(req)
assert isinstance(result, Result)
assert result.result == 3
req = MethodCall("sum", {"a": 1, "b": 2})
result = client.call_method(req)
assert isinstance(result, Result)
assert result.result == 3
def test_client_batch_call(microservice_url):
client = RemoteService(microservice_url)
requests = [
MethodCall("sum", [1, 2]),
MethodCall("divide", [10, 5]),
MethodCall("sum", [10, -10]),
MethodCall("sum", ["hello", " world"]),
MethodCall("sum", [1, 2, 3]), # error
Notification("sum", [1, 2])
]
resp = client.call_batch(*requests)
assert len(resp) == 5
assert resp.get_response_for_call(requests[0]).result == 3
assert resp.get_response_for_call(requests[1]).result == 2.
assert resp.get_response_for_call(requests[2]).result == 0
assert resp.get_response_for_call(requests[3]).result == "hello world"
assert resp.get_response_for_call(requests[5]) is None # it was a notification
def test_client_async_call(microservice_url):
client = RemoteService(microservice_url)
async_call = client.call_method_async("sum", [1, 2])
assert isinstance(async_call, AsyncMethodCall)
async_call.result(wait=True)
assert async_call.finished()
assert async_call.result().result == 3
def test_client_async_as_completed(microservice_url):
client = RemoteService(microservice_url)
for ready_result in as_completed(
*[client.call_method_async("sum", [i, i + 1]) for i in range(10)]):
print(ready_result)
assert ready_result.finished()
def test_client_async_first_completed(microservice_url):
client = RemoteService(microservice_url)
res = first_completed(*[client.call_method_async("sum", [i, i + 1]) for i in range(10)])
assert isinstance(res, Result)
assert res.error is None
assert isinstance(res.result, int)
assert 1 <= res.result <= 19
|
{
"content_hash": "dc12172a98536c09ec704551a5616f67",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 98,
"avg_line_length": 29.78980891719745,
"alnum_prop": 0.6683771648492624,
"repo_name": "vladcalin/gemstone",
"id": "f3a870f732ff8fcd972acc971592245ecc141547",
"size": "4677",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_client_functional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32"
},
{
"name": "Python",
"bytes": "130541"
}
],
"symlink_target": ""
}
|
'''Generate necessary dump files'''
#options
size = 100
regenerate_graph = False
days = 1
force_layout = False
default = str(size)+'.dat'
###
import igraph, pickle, random, os
import math
from collections import OrderedDict
def process(fout):
output = os.path.join('data',fout)
try:
#load graph if previously generated.
g = pickle.load(open('dump.dat'))
print 'Graph loaded from dump.dat'
except IOError:
#generate graph if it does not exist in the directory
print 'Generating graph to dump.dat'
g = igraph.Graph()
g.add_vertices(791)
g.es["weight"] = 1.0
g.delete_vertices([0])
with open('./flu-data/edgeLists/durationCondition/addThenChop/dropoff=0/minimumDuration=1/deltaT=1620/staticWeightedEdgeList_at=1350_min=540_max=2159.txt') as edges:
for edge in edges:
u, v, w = map(int, edge.split())
g[u, v] = 1.0/w
g.delete_vertices(g.vs(_degree_eq = 0))
pickle.dump(g,open('dump.dat','wb'))
print 'Finished'
#take sample of n points
sample = random.sample(range(1,788),790-size)
g.delete_vertices(sample)
print g.summary()
#Fiddle layout
print 'Working out layout'
if force_layout:
#starting everyone at their own location
#coords definition stolen from sim_group_move.py
coords = []
wrap = 10 #positions per row
col_length = int(math.ceil(size/wrap))
for y in range(col_length):
for x in range(wrap):
coords.append((x,y))
print coords
centre = (wrap/2, col_length/2)
else:
l = g.layout_kamada_kawai()
centre = l.centroid()
coords = l.coords
def distance(x, y): return math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
#sort the coords by their position from the centre
order = sorted(enumerate(coords), key = lambda x: distance(x[1], centre))
order = [x[0] for x in order]
#work out mininum global time
mintime = 1000 #must be less than this
for x in order:
if x == 0: continue
with open('./flu-data/moteFiles/node-'+str(x)) as fin:
line = fin.readline()
if line:
t = int(line.split()[-1])
if t < mintime:
mintime = t
completed = []
times = {}
print 'Generating movement file'
for node in order:
if node == 0: continue
times[node] = OrderedDict({0 : node})
node_name = 'node-'+str(node)
f = open('./flu-data/moteFiles/'+node_name, 'r')
for contact in f:
line = map(int, contact.split())
contact_id = line[0]
time = (line[-1] - mintime + 1)
if contact_id in completed:
current_max = 0
current_time = -1
for t, pos in times[contact_id].items():
if current_time < t <= time:
current_max = pos
current_time = t
position = current_max
times[node][time] = position
completed.append(node)
f.close()
print 'Writing movement file'
out = {'coords': coords, 'movement': times}
pickle.dump(out, open(output, 'wb'))
if __name__ == '__main__':
process(default)
|
{
"content_hash": "a08cb64d8cb9ef275a9f604c62211fd9",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 173,
"avg_line_length": 27.96747967479675,
"alnum_prop": 0.5415697674418605,
"repo_name": "1orwell/yrs2013",
"id": "50f03f56e439575fcd90bef8f3bde272ff46cb8b",
"size": "3440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4445577"
},
{
"name": "Python",
"bytes": "17775"
}
],
"symlink_target": ""
}
|
import json
import random
from StringIO import StringIO
from urllib import urlopen
from django.shortcuts import redirect
from demo import get_demo_transactions
from models import Portfolio
from models import User
from models import create_portfolio
from models import create_user
from transactions.models import clone_transaction
from settings import JANRAIN_API_KEY
from view_utils import get_demo_user
from view_utils import logout_user
from view_utils import redirect_to_portfolio_action
from view_utils import render_page
#-------------\
# CONSTANTS |
#-------------/
#---------\
# VIEWS |
#---------/
def index(request):
user_id = request.session.get('user_id')
portfolio = None
if user_id != None and request.GET.get('demo') == None:
portfolio = Portfolio.objects.filter(user__id__exact = user_id)[0]
else:
portfolio_id = request.session.get('sample_portfolio_id')
portfolio = _get_demo_portfolio(portfolio_id)
request.session['sample_portfolio_id'] = portfolio.id
return redirect("/%s/positions.html" % portfolio.id)
def read_only(request, read_only_token):
return redirect("/%s/positions.html" % read_only_token)
def legal(request):
return render_page('legal.html', request)
def feedback(request):
return render_page('feedback.html', request)
def login(request):
token = None
if request.method == 'POST':
token = request.POST.get('token')
else:
token = request.GET.get('token')
if token == None:
return redirect("/demo.html?loginFailed=true")
u = None
try:
u = urlopen('https://rpxnow.com/api/v2/auth_info?apiKey=%s&token=%s' % (JANRAIN_API_KEY, token))
auth_info = json.loads(u.read())
status = auth_info['stat']
if status != 'ok':
return redirect("/demo.html?loginFailed=true")
profile = auth_info['profile']
identifier = profile['identifier']
email = profile['email'] if profile.has_key('email') else None
candidate = User.objects.filter(open_id = identifier)
user = None
portfolio = None
target = 'transactions'
if candidate.count() == 0:
user = create_user(identifier, email)
portfolio = create_portfolio(user, 'Default')
else:
user = candidate[0]
portfolio = Portfolio.objects.filter(user__id__exact = user.id)[0]
target = 'positions'
request.session['user_id'] = user.id
return redirect_to_portfolio_action(target, portfolio)
finally:
if u != None:
u.close()
def logout(request):
return logout_user(request)
#-------------------\
# LOCAL FUNCTIONS |
#-------------------/
def _get_demo_portfolio(portfolio_id):
if portfolio_id != None:
candidate = Portfolio.objects.filter(id = portfolio_id)
if candidate.count() == 1:
return candidate[0]
else:
portfolio = create_portfolio(get_demo_user(), ('SAMPLE #%d' % random.randint(100000000, 999999999)))
for sample_transaction in get_demo_transactions():
transaction = clone_transaction(sample_transaction, portfolio);
transaction.save()
return portfolio
|
{
"content_hash": "0d3fefacaf6b8fdfc7d668e4ae564681",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 104,
"avg_line_length": 27.82882882882883,
"alnum_prop": 0.6639689219812237,
"repo_name": "fxdemolisher/frano",
"id": "6ea35efca53e23a943c06e73f540f1bcce6d5c72",
"size": "3209",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frano/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "73231"
},
{
"name": "Python",
"bytes": "100848"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
}
|
from oslo_policy import policy
from keystone.common.policies import base
registered_limit_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_registered_limit',
check_str='',
scope_types=['system', 'domain', 'project'],
description='Show registered limit details.',
operations=[{'path': '/v3/registered_limits/{registered_limit_id}',
'method': 'GET'},
{'path': '/v3/registered_limits/{registered_limit_id}',
'method': 'HEAD'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_registered_limits',
check_str='',
scope_types=['system', 'domain', 'project'],
description='List registered limits.',
operations=[{'path': '/v3/registered_limits',
'method': 'GET'},
{'path': '/v3/registered_limits',
'method': 'HEAD'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_registered_limits',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Create registered limits.',
operations=[{'path': '/v3/registered_limits',
'method': 'POST'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_registered_limit',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Update registered limit.',
operations=[{'path': '/v3/registered_limits/{registered_limit_id}',
'method': 'PATCH'}]),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_registered_limit',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete registered limit.',
operations=[{'path': '/v3/registered_limits/{registered_limit_id}',
'method': 'DELETE'}])
]
def list_rules():
return registered_limit_policies
|
{
"content_hash": "b6768c6812b24bfa0e95cddc2cde2405",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 40.204081632653065,
"alnum_prop": 0.5796954314720812,
"repo_name": "openstack/keystone",
"id": "7dde90b3ef8e9a5b04e29a602270a5068b2ca512",
"size": "2516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/common/policies/registered_limit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "6213900"
},
{
"name": "Shell",
"bytes": "30491"
}
],
"symlink_target": ""
}
|
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
#otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not _get_repo_path(path, levels=0):
return ''
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
def _get_repo_path(pathname, levels=None):
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
|
{
"content_hash": "291ec37e32520a25ba5a721b5d49bead",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 79,
"avg_line_length": 32.456989247311824,
"alnum_prop": 0.5958257412622163,
"repo_name": "Stargrazer82301/CAAPR",
"id": "9878b7dd0212420390ff8fca0e513496f57766fa",
"size": "8571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "CAAPR/CAAPR_AstroMagic/PTS/pts/magic/view/astropy_helpers_git_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "21972"
},
{
"name": "HTML",
"bytes": "2408"
},
{
"name": "Prolog",
"bytes": "16433"
},
{
"name": "Python",
"bytes": "4465217"
},
{
"name": "Shell",
"bytes": "3793"
}
],
"symlink_target": ""
}
|
"""
Read from the Senseval 2 Corpus.
SENSEVAL [http://www.senseval.org/]
Evaluation exercises for Word Sense Disambiguation.
Organized by ACL-SIGLEX [http://www.siglex.org/]
Prepared by Ted Pedersen <[email protected]>, University of Minnesota,
http://www.d.umn.edu/~tpederse/data.html
Distributed with permission.
The NLTK version of the Senseval 2 files uses well-formed XML.
Each instance of the ambiguous words "hard", "interest", "line", and "serve"
is tagged with a sense identifier, and supplied with context.
"""
from __future__ import print_function, unicode_literals
import re
from xml.etree import ElementTree
from nltk import compat
from nltk.tokenize import *
from .util import *
from .api import *
@compat.python_2_unicode_compatible
class SensevalInstance(object):
def __init__(self, word, position, context, senses):
self.word = word
self.senses = tuple(senses)
self.position = position
self.context = context
def __repr__(self):
return ('SensevalInstance(word=%r, position=%r, '
'context=%r, senses=%r)' %
(self.word, self.position, self.context, self.senses))
class SensevalCorpusReader(CorpusReader):
def instances(self, fileids=None):
return concat([SensevalCorpusView(fileid, enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def _entry(self, tree):
elts = []
for lexelt in tree.findall('lexelt'):
for inst in lexelt.findall('instance'):
sense = inst[0].attrib['senseid']
context = [(w.text, w.attrib['pos'])
for w in inst[1]]
elts.append( (sense, context) )
return elts
class SensevalCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._word_tokenizer = WhitespaceTokenizer()
self._lexelt_starts = [0] # list of streampos
self._lexelts = [None] # list of lexelt names
def read_block(self, stream):
# Decide which lexical element we're in.
lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell())-1
lexelt = self._lexelts[lexelt_num]
instance_lines = []
in_instance = False
while True:
line = stream.readline()
if line == '':
assert instance_lines == []
return []
# Start of a lexical element?
if line.lstrip().startswith('<lexelt'):
lexelt_num += 1
m = re.search('item=("[^"]+"|\'[^\']+\')', line)
assert m is not None # <lexelt> has no 'item=...'
lexelt = m.group(1)[1:-1]
if lexelt_num < len(self._lexelts):
assert lexelt == self._lexelts[lexelt_num]
else:
self._lexelts.append(lexelt)
self._lexelt_starts.append(stream.tell())
# Start of an instance?
if line.lstrip().startswith('<instance'):
assert instance_lines == []
in_instance = True
# Body of an instance?
if in_instance:
instance_lines.append(line)
# End of an instance?
if line.lstrip().startswith('</instance'):
xml_block = '\n'.join(instance_lines)
xml_block = _fixXML(xml_block)
inst = ElementTree.fromstring(xml_block)
return [self._parse_instance(inst, lexelt)]
def _parse_instance(self, instance, lexelt):
senses = []
context = []
position = None
for child in instance:
if child.tag == 'answer':
senses.append(child.attrib['senseid'])
elif child.tag == 'context':
context += self._word_tokenizer.tokenize(child.text)
for cword in child:
if cword.tag == 'compound':
cword = cword[0] # is this ok to do?
if cword.tag == 'head':
# Some santiy checks:
assert position is None, 'head specified twice'
assert cword.text.strip() or len(cword)==1
assert not (cword.text.strip() and len(cword)==1)
# Record the position of the head:
position = len(context)
# Addd on the head word itself:
if cword.text.strip():
context.append(cword.text.strip())
elif cword[0].tag == 'wf':
context.append((cword[0].text,
cword[0].attrib['pos']))
if cword[0].tail:
context += self._word_tokenizer.tokenize(
cword[0].tail)
else:
assert False, 'expected CDATA or wf in <head>'
elif cword.tag == 'wf':
context.append((cword.text, cword.attrib['pos']))
elif cword.tag == 's':
pass # Sentence boundary marker.
else:
print('ACK', cword.tag)
assert False, 'expected CDATA or <wf> or <head>'
if cword.tail:
context += self._word_tokenizer.tokenize(cword.tail)
else:
assert False, 'unexpected tag %s' % child.tag
return SensevalInstance(lexelt, position, context, senses)
def _fixXML(text):
"""
Fix the various issues with Senseval pseudo-XML.
"""
# <~> or <^> => ~ or ^
text = re.sub(r'<([~\^])>', r'\1', text)
# fix lone &
text = re.sub(r'(\s+)\&(\s+)', r'\1&\2', text)
# fix """
text = re.sub(r'"""', '\'"\'', text)
# fix <s snum=dd> => <s snum="dd"/>
text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text)
# fix foreign word tag
text = re.sub(r'<\&frasl>\s*<p[^>]*>', 'FRASL', text)
# remove <&I .>
text = re.sub(r'<\&I[^>]*>', '', text)
# fix <{word}>
text = re.sub(r'<{([^}]+)}>', r'\1', text)
# remove <@>, <p>, </p>
text = re.sub(r'<(@|/?p)>', r'', text)
# remove <&M .> and <&T .> and <&Ms .>
text = re.sub(r'<&\w+ \.>', r'', text)
# remove <!DOCTYPE... > lines
text = re.sub(r'<!DOCTYPE[^>]*>', r'', text)
# remove <[hi]> and <[/p]> etc
text = re.sub(r'<\[\/?[^>]+\]*>', r'', text)
# take the thing out of the brackets: <…>
text = re.sub(r'<(\&\w+;)>', r'\1', text)
# and remove the & for those patterns that aren't regular XML
text = re.sub(r'&(?!amp|gt|lt|apos|quot)', r'', text)
# fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf>
text = re.sub(r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>',
r' <wf pos="\2">\1</wf>', text)
text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text)
return text
|
{
"content_hash": "cf90482a9d52ee4284c85f964b538b3f",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 78,
"avg_line_length": 38.92746113989637,
"alnum_prop": 0.5048582457074404,
"repo_name": "syllog1sm/TextBlob",
"id": "070f8299eb7d4fb7de84671c341ed689812f1932",
"size": "7786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "text/nltk/corpus/reader/senseval.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1163"
},
{
"name": "Erlang",
"bytes": "1863"
},
{
"name": "JavaScript",
"bytes": "326"
},
{
"name": "Python",
"bytes": "3645100"
},
{
"name": "Shell",
"bytes": "6711"
}
],
"symlink_target": ""
}
|
""" PyTest configuration """
import pytest
def pytest_addoption(parser):
# creates a command line option to run slow tests
parser.addoption("--runslow", action="store_true", help="run slow tests")
|
{
"content_hash": "311839748aaaa7a0d22e2964b35e817d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 77,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7101449275362319,
"repo_name": "DC23/cookiecutter-dcpypackage",
"id": "f5d8c872e58191a10397955f356b0099753a19c8",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "Python",
"bytes": "8146"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
}
|
"""
Defines various data containers for plotting a transect.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import os
import re
from functools import partial
import numpy as np
import pyproj as pp
from shapely.ops import transform
def nearest_point(p, plist):
"""
Given a shapely Point, finds the nearest Point in a list of Points.
Args:
p (Point): A ``shapely`` Point.
plist (list): A list of Points.
Returns:
Point: The nearest Point.
"""
p = (p.x, p.y)
plist = [(pt.x, pt.y) for pt in plist]
d_sq = np.sum((np.asarray(plist) - p)**2, axis=1)
return plist[np.argmin(d_sq)]
def listdir(directory, match=None):
"""
Wrapper for `os.listdir()` that returns full paths. A bit like
`utils.walk()` but not recursive. Case insensitive.
Args:
directory (str): The directory to list.
Yields:
str: Full path to each file in turn.
"""
for f in os.listdir(directory):
if match:
if not re.search(match, f, flags=re.IGNORECASE):
continue
yield os.path.join(directory, f)
def walk(directory, match=None):
"""
Find files whose names match some regex. Like `fnmatch` but with regex.
Like `utils.listdir()` but recursive. Case insensitive.
Args:
directory (str): The directory to start at.
Yields:
str: Full path to each file in turn.
"""
for path, dirs, files in os.walk(directory):
for f in files:
if match:
if not re.search(match, f, flags=re.IGNORECASE):
continue
yield os.path.join(path, f)
def rolling_median(a, window):
"""
Apply a moving median filter.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
rolled = np.lib.stride_tricks.as_strided(a,
shape=shape,
strides=strides)
rolled = np.median(rolled, -1)
rolled = np.pad(rolled, window / 2, mode='edge')
return rolled
def despike(curve, curve_sm, max_clip):
"""
Remove spikes from a curve.
"""
spikes = np.where(curve - curve_sm > max_clip)[0]
spukes = np.where(curve_sm - curve > max_clip)[0]
out = np.copy(curve)
out[spikes] = curve_sm[spikes] + max_clip # Clip at the max allowed diff
out[spukes] = curve_sm[spukes] - max_clip # Clip at the min allowed diff
return out
def utm2lola(data):
"""
Transform UTMs to lon-lats. Assumes both are NAD83.
"""
utm_nad83 = pp.Proj("+init=EPSG:26920")
ll_nad83 = pp.Proj("+proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs")
utm2lola = partial(pp.transform, utm_nad83, ll_nad83)
return transform(utm2lola, data)
def get_tops(fname):
"""
Takes a tops_dictionary for plotting in the logs tracks.
Args:
fname (str): The path to a file containing the tops.
"""
tops = {}
with open(fname) as f:
for line in f.readlines():
if not line.startswith('#'):
temp = line.strip().split(',')
tops[temp[0]] = float(temp[1])
return tops
|
{
"content_hash": "5f0b992d29cde185dbb40d5fbe7a5e72",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 77,
"avg_line_length": 27.126050420168067,
"alnum_prop": 0.5845724907063197,
"repo_name": "agile-geoscience/geotransect",
"id": "37349486782544753016a6dd453c87a2ce062dd9",
"size": "3274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101877"
}
],
"symlink_target": ""
}
|
from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64
from ._sfc64 import SFC64
from ._generator import Generator
from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'Philox': Philox,
'SFC64': SFC64,
}
def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
Parameters
----------
bit_generator_name: str
String containing the core BitGenerator
Returns
-------
rg: Generator
Generator using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return Generator(bit_generator())
def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
Parameters
----------
bit_generator_name: str
String containing the name of the BitGenerator
Returns
-------
bit_generator: BitGenerator
BitGenerator instance
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return bit_generator()
def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
Parameters
----------
bit_generator_name: str
String containing the core BitGenerator
Returns
-------
rs: RandomState
Legacy RandomState using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return RandomState(bit_generator())
|
{
"content_hash": "6c6fa38e40131bc0ea210e85ec6938e7",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 74,
"avg_line_length": 27.402439024390244,
"alnum_prop": 0.6012461059190031,
"repo_name": "jorisvandenbossche/numpy",
"id": "29ff696448fee8383ec89b61793e7b478de9d111",
"size": "2247",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numpy/random/_pickle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9068647"
},
{
"name": "C++",
"bytes": "189527"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8307898"
},
{
"name": "Shell",
"bytes": "8482"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
import os
from time import sleep
from selenium import webdriver
DEMO_URL = "https://apostello-testing.herokuapp.com"
PAGES = [
("/accounts/signup/", "Signup"),
("/accounts/login/", "Login"),
("/accounts/logout/", "Logout"),
("/send/adhoc/", "SendtoIndividuals"),
("/send/group/", "SendtoGroup"),
("/recipient/all/", "Recipients"),
("/recipient/edit/1/", "RecipientEdit"),
("/keyword/all/", "Keywords"),
("/keyword/edit/test/", "KeywordEdit"),
("/keyword/responses/test/", "KeywordResponses"),
("/incoming/", "IncomingLog"),
("/incoming/wall/", "IncomingWall"),
("/outgoing/", "OutgoingLog"),
("/elvanto/import/", "ElvantoSync"),
("/", "Home"),
]
def setup_driver():
d = webdriver.Firefox()
d.set_window_size(1200, 800)
# wake up demo site:
print("Waking up demo dyno...")
d.get(DEMO_URL)
sleep(5)
return d
def login(d):
email_box = d.find_elements_by_name("login")[0]
email_box.send_keys("[email protected]")
password_box = d.find_elements_by_name("password")[0]
password_box.send_keys("apostello")
login_button = d.find_element_by_id("login_button")
login_button.click()
sleep(5)
def grab_page(d, uri, desc):
print("Opening {0}".format(uri))
d.get(DEMO_URL + uri)
sleep(5)
with open("screenshots/{0}.png".format(desc), "wb") as f:
f.write(d.get_screenshot_as_png())
if "/accounts/login/" in uri:
login(d)
if __name__ == "__main__":
try:
os.mkdir("screenshots")
except OSError:
pass
d = setup_driver() # rewrite as context manager
for page in PAGES:
grab_page(d, page[0], page[1])
d.quit()
|
{
"content_hash": "2cfe190e1df6cb7afb11c48ec5c96150",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 61,
"avg_line_length": 24.652173913043477,
"alnum_prop": 0.5896531452087007,
"repo_name": "monty5811/apostello",
"id": "4ce4add9c1afa1b2405cf9bf9c3f5f8ae6bac507",
"size": "1723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/demo_screenshots.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18413"
},
{
"name": "Elm",
"bytes": "484874"
},
{
"name": "HTML",
"bytes": "21141"
},
{
"name": "JavaScript",
"bytes": "31346"
},
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "372217"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
}
|
from flask import render_template, request, flash, redirect, url_for, make_response, session, g
from wwag import app, database, forms
from wwag.decorators import player_login_required, viewer_login_required
from MySQLdb import IntegrityError
from datetime import datetime
@app.route("/videos")
def videos():
videos = database.execute("SELECT * FROM Video NATURAL JOIN InstanceRun ORDER BY ViewCount DESC;").fetchall()
return render_template('videos/index.html',videos=videos)
@app.route("/videos/<video_id>")
def videos_show(video_id):
database.execute("UPDATE Video SET ViewCount = ViewCount+1 WHERE VideoID = %s;", (video_id,))
database.commit()
video = database.execute("SELECT * FROM Video NATURAL JOIN InstanceRun NATURAL JOIN Game WHERE VideoID = %s", (video_id,)).fetchone()
if video['Price'] > 0 and not g.get('current_player'):
if not g.get('current_viewer'):
return redirect(url_for('users_login', error="You must sign in as a Viewer to access this page."))
order_line = database.execute("SELECT * FROM ViewerOrderLine NATURAL JOIN ViewerOrder WHERE VideoID = %s AND ViewerID = %s AND ViewedStatus IN ('Pending', 'Viewed') LIMIT 1;", (video['VideoID'], g.current_viewer['ViewerID'])).fetchone()
if order_line:
database.execute("UPDATE ViewerOrder SET ViewedStatus = 'Viewed' WHERE ViewerOrderID = %s;",(order_line['ViewerOrderID'],))
database.commit()
return render_template('videos/show.html', video=video, order_line=order_line)
else:
return render_template('videos/purchase.html', video=video)
else:
return render_template('videos/show.html', video=video)
@app.route("/videos/create", methods=['GET', 'POST'])
@player_login_required
def videos_create():
form = forms.VideoForm(request.form)
form.set_choices()
if request.method == "POST" and form.validate():
lastrowid = database.execute("INSERT INTO Video (VideoName, InstanceRunID, GameID, Price, URL, VideoType, CreatedAt) VALUES (%s, %s, %s, %s, %s, %s, %s);", (form.name.data, form.instance_run_id.data, form.game_id.data, form.price.data, form.url.data, form.video_type.data, datetime.now())).lastrowid
database.commit()
flash("You have created a new video successfully!", 'notice')
return redirect(url_for('videos'))
else:
return render_template('videos/new.html', form=form)
@app.route("/videos/<video_id>/update", methods=['GET', 'POST'])
@player_login_required
def videos_update(video_id):
video = database.execute("SELECT * FROM Video WHERE VideoID = %s", (video_id,)).fetchone()
form = forms.VideoForm(request.form, name=video['VideoName'], instance_run_id=video['InstanceRunID'], game_id=video['GameID'], price=video['Price'], url=video['URL'], video_type=video['VideoType'])
form.set_choices()
if request.method == "POST" and form.validate():
database.execute("UPDATE Video SET VideoName = %s, InstanceRunID = %s, GameID = %s, Price = %s, URL = %s, VideoType = %s WHERE VideoID = %s", (form.name.data, form.instance_run_id.data, form.game_id.data, form.price.data, form.url.data, form.video_type.data, video['VideoID']))
database.commit()
flash("You have updated the video successfully!", 'notice')
return redirect(url_for('videos'))
else:
return render_template('videos/edit.html', form=form, video=video)
@app.route("/videos/<video_id>/delete", methods=['POST'])
@player_login_required
def videos_delete(video_id):
try:
database.execute("DELETE FROM Video WHERE VideoID = %s", (video_id,))
database.commit()
except IntegrityError as e:
flash("You cannot delete this video because some viewers have ordered it!", 'error')
return redirect(url_for('videos_show', video_id=video_id))
flash("You have deleted the video.", 'notice')
return redirect(url_for('videos'))
@app.route("/videos/<video_id>/add_to_basket", methods=['POST'])
@viewer_login_required
def videos_add_to_basket(video_id):
flag_perk = (g.current_viewer['ViewerType'] in ['C', 'P'])
database.execute("INSERT INTO ViewerOrderLine (VideoID, ViewerOrderID, FlagPerk) VALUES (%s, %s, %s);", (video_id, g.open_order['ViewerOrderID'], flag_perk))
database.commit()
flash("Added video to basket!", 'notice')
return redirect(url_for('basket'))
@app.route("/videos/<video_id>/remove_from_basket")
@viewer_login_required
def videos_remove_from_basket(video_id):
database.execute("DELETE FROM ViewerOrderLine WHERE ViewerOrderID = %s AND VideoID = %s;", (g.open_order['ViewerOrderID'], video_id))
database.commit()
flash("The video item has been removed from your basket.", 'notice')
return redirect(url_for('orders_show', order_id=g.open_order['ViewerOrderID']))
|
{
"content_hash": "a6d3dc35d5f89fa29d4b66dd63a609ae",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 303,
"avg_line_length": 55.535714285714285,
"alnum_prop": 0.7093247588424437,
"repo_name": "zhoutong/wwag",
"id": "a996b1461477e035543677c76715ba1586cddde0",
"size": "4665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wwag/views/videos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "627"
},
{
"name": "Python",
"bytes": "54758"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
import iepy
from django.test.runner import DiscoverRunner
class ManagerTestCase(TestCase):
"""
TestCase class that clear the makes sure that the models created thru
ORM are deleted between tests
"""
# We are doing something not very clever, but fast enough (of coding):
# Emulate the django test runner. The downside is that all the environment
# and database stuff is setup once per TestCase (instead as it should, once
# per run)
@classmethod
def setUpClass(cls):
# ORM environment and database setup
iepy.setup()
cls.dj_runner = DiscoverRunner()
cls.dj_runner.setup_test_environment()
cls.old_config = cls.dj_runner.setup_databases()
# Creating Manager instance (if requested)
if hasattr(cls, 'ManagerClass'):
cls.manager = cls.ManagerClass()
@classmethod
def tearDownClass(cls):
cls.dj_runner.teardown_databases(cls.old_config)
cls.dj_runner.teardown_test_environment()
|
{
"content_hash": "1da36e1d93a00f2e5ed74b3ebc31ed10",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 33.09375,
"alnum_prop": 0.6732766761095373,
"repo_name": "mrshu/iepy",
"id": "e6115cd92d1f010a87beca85c9d4d8e5eeee608d",
"size": "1059",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/manager_case.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "25531"
},
{
"name": "HTML",
"bytes": "26374"
},
{
"name": "JavaScript",
"bytes": "26234"
},
{
"name": "Python",
"bytes": "400269"
}
],
"symlink_target": ""
}
|
class Pipeline:
def __init__(self, transformations):
self.transformations = transformations
def run(self, origin):
destination = origin
for transform in self.transformations:
destination = transform.apply(destination)
return destination
|
{
"content_hash": "b5c8a78800b560c342b652f681dac7a8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.6620689655172414,
"repo_name": "bhillmann/gingivere",
"id": "f79a5703d5d6482208cc450c3935b47102c209de",
"size": "290",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gingivere/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51111"
}
],
"symlink_target": ""
}
|
"""Updates generated docs from Python doc comments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tensorflow as tf
from tensorflow.python.framework import docs
from tensorflow.python.framework import framework_lib
from tensorflow.python.client import client_lib
tf.flags.DEFINE_string("out_dir", None,
"Directory to which docs should be written.")
tf.flags.DEFINE_boolean("print_hidden_regex", False,
"Dump a regular expression matching any hidden symbol")
FLAGS = tf.flags.FLAGS
PREFIX_TEXT = """
Note: Functions taking `Tensor` arguments can also take anything accepted by
[`tf.convert_to_tensor`](framework.md#convert_to_tensor).
"""
def get_module_to_name():
return {
tf: "tf",
tf.errors: "tf.errors",
tf.image: "tf.image",
tf.nn: "tf.nn",
tf.train: "tf.train",
tf.python_io: "tf.python_io",
tf.test: "tf.test",
tf.contrib.layers: "tf.contrib.layers",
tf.contrib.util: "tf.contrib.util",
}
def all_libraries(module_to_name, members, documented):
# A list of (filename, docs.Library) pairs representing the individual files
# that we want to create.
def library(name, title, module=None, **args):
if module is None:
module = sys.modules["tensorflow.python.ops" +
("" if name == "ops" else "." + name)]
return (name + ".md", docs.Library(title=title,
module_to_name=module_to_name,
members=members,
documented=documented,
module=module,
**args))
return [
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("constant_op", "Constants, Sequences, and Random Values",
prefix=PREFIX_TEXT),
library("state_ops", "Variables",
exclude_symbols=["create_partitioned_variables"],
prefix=PREFIX_TEXT),
library("array_ops", "Tensor Transformations",
exclude_symbols=["list_diff"], prefix=PREFIX_TEXT),
library("math_ops", "Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("histogram_ops", "Histograms"),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops", "Sparse Tensors",
exclude_symbols=["serialize_sparse", "serialize_many_sparse",
"deserialize_many_sparse"],
prefix=PREFIX_TEXT),
library("io_ops", "Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"PaddingFIFOQueue",
"initialize_all_tables",
"parse_single_sequence_example",
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn", "Neural Network", tf.nn,
exclude_symbols=["conv2d_backprop_input",
"conv2d_backprop_filter", "avg_pool_grad",
"max_pool_grad", "max_pool_grad_with_argmax",
"batch_norm_with_global_normalization_grad",
"lrn_grad", "relu6_grad", "softplus_grad",
"softsign_grad", "xw_plus_b", "relu_layer",
"lrn", "batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler",
"rnn", "state_saving_rnn", "bidirectional_rnn",
"dynamic_rnn", "seq2seq", "rnn_cell"],
prefix=PREFIX_TEXT),
library("client", "Running Graphs", client_lib),
library("train", "Training", tf.train,
exclude_symbols=["Feature", "Features", "BytesList", "FloatList",
"Int64List", "Example", "InferenceExample",
"FeatureList", "FeatureLists",
"RankingExample", "SequenceExample"]),
library("script_ops", "Wraps python functions", prefix=PREFIX_TEXT),
library("test", "Testing", tf.test),
library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
library("contrib.util", "Utilities (contrib)", tf.contrib.util),
]
_hidden_symbols = ["Event", "LogMessage", "Summary", "SessionLog", "xrange",
"HistogramProto", "ConfigProto", "NodeDef", "GraphDef",
"GPUOptions", "GraphOptions", "RunOptions", "RunMetadata",
"SessionInterface", "BaseSession", "NameAttrList",
"AttrValue", "TensorArray", "OptimizerOptions",
"CollectionDef", "MetaGraphDef", "QueueRunnerDef",
"SaverDef", "VariableDef", "TestCase", "GrpcServer",
"ClusterDef", "JobDef", "ServerDef"]
def main(unused_argv):
if not FLAGS.out_dir:
tf.logging.error("out_dir not specified")
return -1
# Document libraries
documented = set()
module_to_name = get_module_to_name()
members = docs.collect_members(module_to_name)
libraries = all_libraries(module_to_name, members, documented)
# Define catch_all library before calling write_libraries to avoid complaining
# about generically hidden symbols.
catch_all = docs.Library(title="Catch All", module=None,
exclude_symbols=_hidden_symbols,
module_to_name=module_to_name, members=members,
documented=documented)
# Write docs to files
docs.write_libraries(FLAGS.out_dir, libraries)
# Make it easy to search for hidden symbols
if FLAGS.print_hidden_regex:
hidden = set(_hidden_symbols)
for _, lib in libraries:
hidden.update(lib.exclude_symbols)
print(r"hidden symbols regex = r'\b(%s)\b'" % "|".join(sorted(hidden)))
# Verify that all symbols are mentioned in some library doc.
catch_all.assert_no_leftovers()
# Generate index
with open(os.path.join(FLAGS.out_dir, "index.md"), "w") as f:
docs.Index(module_to_name, members, libraries,
"../../api_docs/python/").write_markdown_to_file(f)
if __name__ == "__main__":
tf.app.run()
|
{
"content_hash": "afe3bc5228d5fe67d68b966a6d39fe95",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 80,
"avg_line_length": 43.261146496815286,
"alnum_prop": 0.5665488810365136,
"repo_name": "awni/tensorflow",
"id": "736924afcaf94285f0d7989133acc24885318698",
"size": "7470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/gen_docs_combined.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156098"
},
{
"name": "C++",
"bytes": "7765982"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "684124"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771787"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "103762"
},
{
"name": "Python",
"bytes": "4675299"
},
{
"name": "Shell",
"bytes": "126103"
},
{
"name": "TypeScript",
"bytes": "342627"
}
],
"symlink_target": ""
}
|
"""
babel.util
~~~~~~~~~~
Various utility classes and functions.
:copyright: (c) 2013-2022 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import codecs
import collections
from datetime import timedelta, tzinfo
import os
import re
import textwrap
import pytz as _pytz
from babel import localtime
missing = object()
def distinct(iterable):
"""Yield all items in an iterable collection that are distinct.
Unlike when using sets for a similar effect, the original ordering of the
items in the collection is preserved by this function.
>>> print(list(distinct([1, 2, 1, 3, 4, 4])))
[1, 2, 3, 4]
>>> print(list(distinct('foobar')))
['f', 'o', 'b', 'a', 'r']
:param iterable: the iterable collection providing the data
"""
seen = set()
for item in iter(iterable):
if item not in seen:
yield item
seen.add(item)
# Regexp to match python magic encoding line
PYTHON_MAGIC_COMMENT_re = re.compile(
br'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: https://docs.python.org/3.4/reference/lexical_analysis.html#encoding-declarations
The ``fp`` argument should be a seekable file object.
(From Jeff Dairiki)
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import ast
ast.parse(line1.decode('latin-1'))
except (ImportError, SyntaxError, UnicodeEncodeError):
# Either it's a real syntax error, in which case the source is
# not valid python source, or line2 is a continuation of line1,
# in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
magic_comment_encoding = m.group(1).decode('latin-1')
if magic_comment_encoding != 'utf-8':
raise SyntaxError(f"encoding problem: {magic_comment_encoding} with BOM")
return 'utf-8'
elif m:
return m.group(1).decode('latin-1')
else:
return None
finally:
fp.seek(pos)
PYTHON_FUTURE_IMPORT_re = re.compile(
r'from\s+__future__\s+import\s+\(*(.+)\)*')
def parse_future_flags(fp, encoding='latin-1'):
"""Parse the compiler flags by :mod:`__future__` from the given Python
code.
"""
import __future__
pos = fp.tell()
fp.seek(0)
flags = 0
try:
body = fp.read().decode(encoding)
# Fix up the source to be (hopefully) parsable by regexpen.
# This will likely do untoward things if the source code itself is broken.
# (1) Fix `import (\n...` to be `import (...`.
body = re.sub(r'import\s*\([\r\n]+', 'import (', body)
# (2) Join line-ending commas with the next line.
body = re.sub(r',\s*[\r\n]+', ', ', body)
# (3) Remove backslash line continuations.
body = re.sub(r'\\\s*[\r\n]+', ' ', body)
for m in PYTHON_FUTURE_IMPORT_re.finditer(body):
names = [x.strip().strip('()') for x in m.group(1).split(',')]
for name in names:
feature = getattr(__future__, name, None)
if feature:
flags |= feature.compiler_flag
finally:
fp.seek(pos)
return flags
def pathmatch(pattern, filename):
"""Extended pathname pattern matching.
This function is similar to what is provided by the ``fnmatch`` module in
the Python standard library, but:
* can match complete (relative or absolute) path names, and not just file
names, and
* also supports a convenience pattern ("**") to match files at any
directory level.
Examples:
>>> pathmatch('**.py', 'bar.py')
True
>>> pathmatch('**.py', 'foo/bar/baz.py')
True
>>> pathmatch('**.py', 'templates/index.html')
False
>>> pathmatch('./foo/**.py', 'foo/bar/baz.py')
True
>>> pathmatch('./foo/**.py', 'bar/baz.py')
False
>>> pathmatch('^foo/**.py', 'foo/bar/baz.py')
True
>>> pathmatch('^foo/**.py', 'bar/baz.py')
False
>>> pathmatch('**/templates/*.html', 'templates/index.html')
True
>>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
False
:param pattern: the glob pattern
:param filename: the path name of the file to match against
"""
symbols = {
'?': '[^/]',
'?/': '[^/]/',
'*': '[^/]+',
'*/': '[^/]+/',
'**/': '(?:.+/)*?',
'**': '(?:.+/)*?[^/]+',
}
if pattern.startswith('^'):
buf = ['^']
pattern = pattern[1:]
elif pattern.startswith('./'):
buf = ['^']
pattern = pattern[2:]
else:
buf = []
for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
if idx % 2:
buf.append(symbols[part])
elif part:
buf.append(re.escape(part))
match = re.match(f"{''.join(buf)}$", filename.replace(os.sep, "/"))
return match is not None
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
)
def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
"""Simple wrapper around the ``textwrap.wrap`` function in the standard
library. This version does not wrap lines on hyphens in words.
:param text: the text to wrap
:param width: the maximum line width
:param initial_indent: string that will be prepended to the first line of
wrapped output
:param subsequent_indent: string that will be prepended to all lines save
the first of wrapped output
"""
wrapper = TextWrapper(width=width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
break_long_words=False)
return wrapper.wrap(text)
# TODO (Babel 3.x): Remove this re-export
odict = collections.OrderedDict
class FixedOffsetTimezone(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self._offset = timedelta(minutes=offset)
if name is None:
name = 'Etc/GMT%+d' % offset
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return f'<FixedOffset "{self.zone}" {self._offset}>'
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return ZERO
# Export the localtime functionality here because that's
# where it was in the past.
UTC = _pytz.utc
LOCALTZ = localtime.LOCALTZ
get_localzone = localtime.get_localzone
STDOFFSET = localtime.STDOFFSET
DSTOFFSET = localtime.DSTOFFSET
DSTDIFF = localtime.DSTDIFF
ZERO = localtime.ZERO
def _cmp(a, b):
return (a > b) - (a < b)
|
{
"content_hash": "75b3f14d8b3081c364645367781d30be",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 93,
"avg_line_length": 28.599236641221374,
"alnum_prop": 0.5631923128253036,
"repo_name": "python-babel/babel",
"id": "0436b9ee4938ed0b3515bc8f528d249deb5e0317",
"size": "7493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babel/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4751"
},
{
"name": "Makefile",
"bytes": "724"
},
{
"name": "Python",
"bytes": "709493"
}
],
"symlink_target": ""
}
|
"""unit testing code for 3D stuff
"""
from rdkit import RDConfig
import unittest,os
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import TorsionFingerprints
class TestCase(unittest.TestCase):
def testConformerRMS(self):
m1 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = AllChem.EmbedMultipleConfs(m1,2)
m2 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
m2.AddConformer(m1.GetConformer(id=1))
# test that the prealigned flag is working
rms1 = AllChem.GetConformerRMS(m1, 0, 1, prealigned=True)
rms2 = AllChem.GetConformerRMS(m1, 0, 1, prealigned=False)
self.assertTrue((rms1>rms2))
# test that RMS is the same as calculated by AlignMol()
self.assertAlmostEqual(rms2, AllChem.GetBestRMS(m2, m1, 1, 0), 3)
# the RMS with itself must be zero
rms2 = AllChem.GetConformerRMS(m1, 0, 0, prealigned=True)
self.assertAlmostEqual(rms2, 0.0, 4)
def testConformerRMSMatrix(self):
m1 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = AllChem.EmbedMultipleConfs(m1,3)
m2 = Chem.MolFromSmiles('CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
m2.AddConformer(m1.GetConformer(id=0))
# test that the RMS matrix has the correct size
rmat = AllChem.GetConformerRMSMatrix(m1)
self.assertEqual(len(rmat), 3)
# test that the elements are in the right order
self.assertAlmostEqual(rmat[0], AllChem.GetBestRMS(m1, m2, 1, 0), 3)
self.assertAlmostEqual(rmat[1], AllChem.GetBestRMS(m1, m2, 2, 0), 3)
# test the prealigned option
rmat2 = AllChem.GetConformerRMSMatrix(m1, prealigned=True)
self.assertAlmostEqual(rmat[0], rmat2[0])
def testTorsionFingerprints(self):
# we use the xray structure from the paper (JCIM, 52, 1499, 2012): 1DWD
refFile = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','1DWD_ligand.pdb')
ref = Chem.MolFromSmiles('NC(=[NH2+])c1ccc(C[C@@H](NC(=O)CNS(=O)(=O)c2ccc3ccccc3c2)C(=O)N2CCCCC2)cc1')
mol = Chem.MolFromPDBFile(refFile)
mol = AllChem.AssignBondOrdersFromTemplate(ref, mol)
# the torsion lists
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol)
self.assertEqual(len(tors_list), 11)
self.assertEqual(len(tors_list_rings), 4)
self.assertAlmostEqual(tors_list[-1][1], 180.0, 4)
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, maxDev='spec')
self.assertAlmostEqual(tors_list[-1][1], 90.0, 4)
self.assertRaises(ValueError, TorsionFingerprints.CalculateTorsionLists, mol, maxDev='test')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, symmRadius=0)
self.assertEqual(len(tors_list[0][0]), 2)
# the weights
weights = TorsionFingerprints.CalculateTorsionWeights(mol)
self.assertAlmostEqual(weights[4], 1.0)
self.assertEqual(len(weights),len(tors_list+tors_list_rings))
weights = TorsionFingerprints.CalculateTorsionWeights(mol, 15, 14)
self.assertAlmostEqual(weights[3], 1.0)
self.assertRaises(ValueError, TorsionFingerprints.CalculateTorsionWeights, mol, 15, 3)
# the torsion angles
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol)
torsions = TorsionFingerprints.CalculateTorsionAngles(mol, tors_list, tors_list_rings)
self.assertEqual(len(weights), len(torsions))
self.assertAlmostEqual(torsions[2][0], 232.5346, 4)
# the torsion fingerprint deviation
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions)
self.assertAlmostEqual(tfd, 0.0)
refFile = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','1PPC_ligand.pdb')
mol2 = Chem.MolFromPDBFile(refFile)
mol2 = AllChem.AssignBondOrdersFromTemplate(ref, mol2)
torsions2 = TorsionFingerprints.CalculateTorsionAngles(mol2, tors_list, tors_list_rings)
weights = TorsionFingerprints.CalculateTorsionWeights(mol)
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions2, weights=weights)
self.assertAlmostEqual(tfd, 0.0645, 4)
tfd = TorsionFingerprints.CalculateTFD(torsions, torsions2)
self.assertAlmostEqual(tfd, 0.1680, 4)
# the wrapper functions
tfd = TorsionFingerprints.GetTFDBetweenMolecules(mol, mol2)
self.assertAlmostEqual(tfd, 0.0645, 4)
mol.AddConformer(mol2.GetConformer(), assignId=True)
mol.AddConformer(mol2.GetConformer(), assignId=True)
tfd = TorsionFingerprints.GetTFDBetweenConformers(mol, confIds1=[0], confIds2=[1, 2])
self.assertEqual(len(tfd), 2)
self.assertAlmostEqual(tfd[0], 0.0645, 4)
tfdmat = TorsionFingerprints.GetTFDMatrix(mol)
self.assertEqual(len(tfdmat), 3)
def testTorsionFingerprintsAtomReordering(self):
# we use the xray structure from the paper (JCIM, 52, 1499, 2012): 1DWD
refFile = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','1DWD_ligand.pdb')
ref = Chem.MolFromSmiles('NC(=[NH2+])c1ccc(C[C@@H](NC(=O)CNS(=O)(=O)c2ccc3ccccc3c2)C(=O)N2CCCCC2)cc1')
mol1 = Chem.MolFromPDBFile(refFile)
mol1 = AllChem.AssignBondOrdersFromTemplate(ref, mol1)
refFile = os.path.join(RDConfig.RDCodeDir,'Chem','test_data','1DWD_ligand_reordered.pdb')
mol2 = Chem.MolFromPDBFile(refFile)
mol2 = AllChem.AssignBondOrdersFromTemplate(ref, mol2)
tfd = TorsionFingerprints.GetTFDBetweenMolecules(mol1, mol2)
self.assertEqual(tfd, 0.0)
def testTorsionFingerprintsColinearBonds(self):
# test that single bonds adjacent to triple bonds are ignored
mol = Chem.MolFromSmiles('CCC#CCC')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, ignoreColinearBonds=True)
self.assertEqual(len(tors_list), 0)
weights = TorsionFingerprints.CalculateTorsionWeights(mol, ignoreColinearBonds=True)
self.assertEqual(len(weights), 0)
# test that they are not ignored, but alternative atoms searched for
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, ignoreColinearBonds=False)
self.assertEqual(len(tors_list), 1)
self.assertEqual(tors_list[0][0][0], (0, 1, 4, 5))
weights = TorsionFingerprints.CalculateTorsionWeights(mol, ignoreColinearBonds=False)
self.assertEqual(len(weights), 1)
# test that single bonds adjacent to terminal triple bonds are always ignored
mol = Chem.MolFromSmiles('C#CCC')
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, ignoreColinearBonds=True)
self.assertEqual(len(tors_list), 0)
tors_list, tors_list_rings = TorsionFingerprints.CalculateTorsionLists(mol, ignoreColinearBonds=False)
self.assertEqual(len(tors_list), 0)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "05d238b4ef541cb67e2c52a0b2e15612",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 106,
"avg_line_length": 45.23809523809524,
"alnum_prop": 0.7341353383458646,
"repo_name": "soerendip42/rdkit",
"id": "745d3b91be79d55078d401cf59e918405b5f9e8d",
"size": "6659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdkit/Chem/UnitTestMol3D.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203258"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7168898"
},
{
"name": "CMake",
"bytes": "585758"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15443"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3045831"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import *
# Register your models here.
class FullAdmin(admin.ModelAdmin):
readonly_fields = ('date','id',)
admin.site.register(Question, FullAdmin)
admin.site.register(Reply)
admin.site.register(ReplyVotedBy, FullAdmin)
admin.site.register(Comment)
|
{
"content_hash": "385f430593d2cd6112a21892870749e3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 44,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.7740863787375415,
"repo_name": "sergiorb/askkit",
"id": "ad334da512303e5322d7feeadfa95fcff13decad",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221614"
},
{
"name": "HTML",
"bytes": "111903"
},
{
"name": "JavaScript",
"bytes": "952566"
},
{
"name": "Python",
"bytes": "688989"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from comments.views import (
CommentCreate,
CommentList,
CommentDetail,
CommentDestroy,
CommentUpdate,
)
urlpatterns = [
url(r'^$', CommentList.as_view(), name = 'lists'),
url(r'^create/$', CommentCreate.as_view(), name = 'create'),
url(r'^(?P<pk>\d+)/$', CommentDetail.as_view(), name = 'details'),
url(r'^(?P<pk>\d+)/update/$', CommentUpdate.as_view(), name = 'update'),
url(r'^(?P<pk>\d+)/destroy/$', CommentDestroy.as_view(), name = 'destroy')
]
|
{
"content_hash": "b6907460e24011eb4d8c714d11c1ed44",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 39,
"alnum_prop": 0.5144230769230769,
"repo_name": "bakowroc/newsfeed-system",
"id": "196d86830def0f26a4e18d3dce5220efa2073801",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newsfeedsystem/comments/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27521"
},
{
"name": "HTML",
"bytes": "18285"
},
{
"name": "JavaScript",
"bytes": "2149"
},
{
"name": "Python",
"bytes": "33883"
},
{
"name": "TypeScript",
"bytes": "34866"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from django.db import connection
from django.test import override_settings
from sqlalchemy.sql import (
and_, select, column, table,
)
from sqlalchemy.sql import compiler # type: ignore
from zerver.models import (
Realm, Recipient, Stream, Subscription, UserProfile, Attachment,
get_display_recipient, get_recipient, get_realm, get_stream, get_user,
Reaction, UserMessage
)
from zerver.lib.message import (
MessageDict,
)
from zerver.lib.narrow import (
build_narrow_filter,
)
from zerver.lib.request import JsonableError
from zerver.lib.str_utils import force_bytes
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import (
POSTRequestMock,
TestCase,
get_user_messages, queries_captured,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.views.messages import (
exclude_muting_conditions,
get_messages_backend, ok_to_include_history,
NarrowBuilder, BadNarrowOperator, Query,
LARGER_THAN_MAX_MESSAGE_ID,
)
from typing import Dict, List, Mapping, Sequence, Tuple, Generic, Union, Any, Optional, Text
from six.moves import range
import os
import re
import ujson
def get_sqlalchemy_query_params(query):
# type: (Text) -> Dict[Text, Text]
dialect = get_sqlalchemy_connection().dialect # type: ignore
comp = compiler.SQLCompiler(dialect, query)
return comp.params
def fix_ws(s):
# type: (Text) -> Text
return re.sub('\s+', ' ', str(s)).strip()
def get_recipient_id_for_stream_name(realm, stream_name):
# type: (Realm, Text) -> Text
stream = get_stream(stream_name, realm)
return get_recipient(Recipient.STREAM, stream.id).id
def mute_stream(realm, user_profile, stream_name):
# type: (Realm, Text, Text) -> None
stream = get_stream(stream_name, realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(recipient=recipient, user_profile=user_profile)
subscription.in_home_view = False
subscription.save()
class NarrowBuilderTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.realm = get_realm('zulip')
self.user_profile = self.example_user('hamlet')
self.builder = NarrowBuilder(self.user_profile, column('id'))
self.raw_query = select([column("id")], None, table("zerver_message"))
def test_add_term_using_not_defined_operator(self):
# type: () -> None
term = dict(operator='not-defined', operand='any')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_stream_operator(self):
# type: () -> None
term = dict(operator='stream', operand='Scotland')
self._do_add_term_test(term, 'WHERE recipient_id = :recipient_id_1')
def test_add_term_using_stream_operator_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='stream', operand='Scotland', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id != :recipient_id_1')
def test_add_term_using_stream_operator_and_non_existing_operand_should_raise_error(self): # NEGATED
# type: () -> None
term = dict(operator='stream', operand='NonExistingStream')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_is_operator_and_private_operand(self):
# type: () -> None
term = dict(operator='is', operand='private')
self._do_add_term_test(term, 'WHERE type = :type_1 OR type = :type_2')
def test_add_term_using_is_operator_private_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='is', operand='private', negated=True)
self._do_add_term_test(term, 'WHERE NOT (type = :type_1 OR type = :type_2)')
def test_add_term_using_is_operator_and_non_private_operand(self):
# type: () -> None
for operand in ['starred', 'mentioned', 'alerted']:
term = dict(operator='is', operand=operand)
self._do_add_term_test(term, 'WHERE (flags & :flags_1) != :param_1')
def test_add_term_using_is_operator_and_unread_operand(self):
# type: () -> None
term = dict(operator='is', operand='unread')
self._do_add_term_test(term, 'WHERE (flags & :flags_1) = :param_1')
def test_add_term_using_is_operator_and_unread_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='is', operand='unread', negated=True)
self._do_add_term_test(term, 'WHERE (flags & :flags_1) != :param_1')
def test_add_term_using_is_operator_non_private_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='is', operand='starred', negated=True)
where_clause = 'WHERE (flags & :flags_1) = :param_1'
params = dict(
flags_1=UserMessage.flags.starred.mask,
param_1=0
)
self._do_add_term_test(term, where_clause, params)
term = dict(operator='is', operand='alerted', negated=True)
where_clause = 'WHERE (flags & :flags_1) = :param_1'
params = dict(
flags_1=UserMessage.flags.has_alert_word.mask,
param_1=0
)
self._do_add_term_test(term, where_clause, params)
term = dict(operator='is', operand='mentioned', negated=True)
where_clause = 'WHERE NOT ((flags & :flags_1) != :param_1 OR (flags & :flags_2) != :param_2)'
params = dict(
flags_1=UserMessage.flags.mentioned.mask,
param_1=0,
flags_2=UserMessage.flags.wildcard_mentioned.mask,
param_2=0
)
self._do_add_term_test(term, where_clause, params)
def test_add_term_using_non_supported_operator_should_raise_error(self):
# type: () -> None
term = dict(operator='is', operand='non_supported')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_topic_operator_and_lunch_operand(self):
# type: () -> None
term = dict(operator='topic', operand='lunch')
self._do_add_term_test(term, 'WHERE upper(subject) = upper(:param_1)')
def test_add_term_using_topic_operator_lunch_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='topic', operand='lunch', negated=True)
self._do_add_term_test(term, 'WHERE upper(subject) != upper(:param_1)')
def test_add_term_using_topic_operator_and_personal_operand(self):
# type: () -> None
term = dict(operator='topic', operand='personal')
self._do_add_term_test(term, 'WHERE upper(subject) = upper(:param_1)')
def test_add_term_using_topic_operator_personal_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='topic', operand='personal', negated=True)
self._do_add_term_test(term, 'WHERE upper(subject) != upper(:param_1)')
def test_add_term_using_sender_operator(self):
# type: () -> None
term = dict(operator='sender', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE sender_id = :param_1')
def test_add_term_using_sender_operator_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='sender', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE sender_id != :param_1')
def test_add_term_using_sender_operator_with_non_existing_user_as_operand(self): # NEGATED
# type: () -> None
term = dict(operator='sender', operand='[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_pm_with_operator_and_not_the_same_user_as_operand(self):
# type: () -> None
term = dict(operator='pm-with', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE sender_id = :sender_id_1 AND recipient_id = :recipient_id_1 OR sender_id = :sender_id_2 AND recipient_id = :recipient_id_2')
def test_add_term_using_pm_with_operator_not_the_same_user_as_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='pm-with', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE NOT (sender_id = :sender_id_1 AND recipient_id = :recipient_id_1 OR sender_id = :sender_id_2 AND recipient_id = :recipient_id_2)')
def test_add_term_using_pm_with_operator_the_same_user_as_operand(self):
# type: () -> None
term = dict(operator='pm-with', operand=self.example_email("hamlet"))
self._do_add_term_test(term, 'WHERE sender_id = :sender_id_1 AND recipient_id = :recipient_id_1')
def test_add_term_using_pm_with_operator_the_same_user_as_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='pm-with', operand=self.example_email("hamlet"), negated=True)
self._do_add_term_test(term, 'WHERE NOT (sender_id = :sender_id_1 AND recipient_id = :recipient_id_1)')
def test_add_term_using_pm_with_operator_and_more_than_user_as_operand(self):
# type: () -> None
term = dict(operator='pm-with', operand='[email protected], [email protected]')
self._do_add_term_test(term, 'WHERE recipient_id = :recipient_id_1')
def test_add_term_using_pm_with_operator_more_than_user_as_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='pm-with', operand='[email protected], [email protected]', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id != :recipient_id_1')
def test_add_term_using_pm_with_operator_with_non_existing_user_as_operand(self):
# type: () -> None
term = dict(operator='pm-with', operand='[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_pm_with_operator_with_existing_and_non_existing_user_as_operand(self):
# type: () -> None
term = dict(operator='pm-with', operand='[email protected],[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_id_operator(self):
# type: () -> None
term = dict(operator='id', operand=555)
self._do_add_term_test(term, 'WHERE id = :param_1')
def test_add_term_using_id_operator_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='id', operand=555, negated=True)
self._do_add_term_test(term, 'WHERE id != :param_1')
def test_add_term_using_group_pm_operator_and_not_the_same_user_as_operand(self):
# type: () -> None
term = dict(operator='group-pm-with', operand=self.example_email("othello"))
self._do_add_term_test(term, 'WHERE recipient_id != recipient_id')
def test_add_term_using_group_pm_operator_not_the_same_user_as_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='group-pm-with', operand=self.example_email("othello"), negated=True)
self._do_add_term_test(term, 'WHERE recipient_id = recipient_id')
def test_add_term_using_group_pm_operator_with_non_existing_user_as_operand(self):
# type: () -> None
term = dict(operator='group-pm-with', operand='[email protected]')
self.assertRaises(BadNarrowOperator, self._build_query, term)
@override_settings(USING_PGROONGA=False)
def test_add_term_using_search_operator(self):
# type: () -> None
term = dict(operator='search', operand='"french fries"')
self._do_add_term_test(term, 'WHERE (lower(content) LIKE lower(:content_1) OR lower(subject) LIKE lower(:subject_1)) AND (search_tsvector @@ plainto_tsquery(:param_2, :param_3))')
@override_settings(USING_PGROONGA=False)
def test_add_term_using_search_operator_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='search', operand='"french fries"', negated=True)
self._do_add_term_test(term, 'WHERE NOT (lower(content) LIKE lower(:content_1) OR lower(subject) LIKE lower(:subject_1)) AND NOT (search_tsvector @@ plainto_tsquery(:param_2, :param_3))')
@override_settings(USING_PGROONGA=True)
def test_add_term_using_search_operator_pgroonga(self):
# type: () -> None
term = dict(operator='search', operand='"french fries"')
self._do_add_term_test(term, 'WHERE search_pgroonga @@ :search_pgroonga_1')
@override_settings(USING_PGROONGA=True)
def test_add_term_using_search_operator_and_negated_pgroonga(self): # NEGATED
# type: () -> None
term = dict(operator='search', operand='"french fries"', negated=True)
self._do_add_term_test(term, 'WHERE NOT (search_pgroonga @@ :search_pgroonga_1)')
def test_add_term_using_has_operator_and_attachment_operand(self):
# type: () -> None
term = dict(operator='has', operand='attachment')
self._do_add_term_test(term, 'WHERE has_attachment')
def test_add_term_using_has_operator_attachment_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='has', operand='attachment', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_attachment')
def test_add_term_using_has_operator_and_image_operand(self):
# type: () -> None
term = dict(operator='has', operand='image')
self._do_add_term_test(term, 'WHERE has_image')
def test_add_term_using_has_operator_image_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='has', operand='image', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_image')
def test_add_term_using_has_operator_and_link_operand(self):
# type: () -> None
term = dict(operator='has', operand='link')
self._do_add_term_test(term, 'WHERE has_link')
def test_add_term_using_has_operator_link_operand_and_negated(self): # NEGATED
# type: () -> None
term = dict(operator='has', operand='link', negated=True)
self._do_add_term_test(term, 'WHERE NOT has_link')
def test_add_term_using_has_operator_non_supported_operand_should_raise_error(self):
# type: () -> None
term = dict(operator='has', operand='non_supported')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_in_operator(self):
# type: () -> None
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='home')
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (:recipient_id_1)')
def test_add_term_using_in_operator_and_negated(self):
# type: () -> None
# negated = True should not change anything
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='home', negated=True)
self._do_add_term_test(term, 'WHERE recipient_id NOT IN (:recipient_id_1)')
def test_add_term_using_in_operator_and_all_operand(self):
# type: () -> None
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='all')
query = self._build_query(term)
self.assertEqual(str(query), 'SELECT id \nFROM zerver_message')
def test_add_term_using_in_operator_all_operand_and_negated(self):
# type: () -> None
# negated = True should not change anything
mute_stream(self.realm, self.user_profile, 'Verona')
term = dict(operator='in', operand='all', negated=True)
query = self._build_query(term)
self.assertEqual(str(query), 'SELECT id \nFROM zerver_message')
def test_add_term_using_in_operator_and_not_defined_operand(self):
# type: () -> None
term = dict(operator='in', operand='not_defined')
self.assertRaises(BadNarrowOperator, self._build_query, term)
def test_add_term_using_near_operator(self):
# type: () -> None
term = dict(operator='near', operand='operand')
query = self._build_query(term)
self.assertEqual(str(query), 'SELECT id \nFROM zerver_message')
def _do_add_term_test(self, term, where_clause, params=None):
# type: (Dict[str, Any], Text, Optional[Dict[str, Any]]) -> None
query = self._build_query(term)
if params is not None:
actual_params = query.compile().params
self.assertEqual(actual_params, params)
self.assertTrue(where_clause in str(query))
def _build_query(self, term):
# type: (Dict[str, Any]) -> Query
return self.builder.add_term(self.raw_query, term)
class BuildNarrowFilterTest(TestCase):
def test_build_narrow_filter(self):
# type: () -> None
fixtures_path = os.path.join(os.path.dirname(__file__),
'../fixtures/narrow.json')
scenarios = ujson.loads(open(fixtures_path, 'r').read())
self.assertTrue(len(scenarios) == 9)
for scenario in scenarios:
narrow = scenario['narrow']
accept_events = scenario['accept_events']
reject_events = scenario['reject_events']
narrow_filter = build_narrow_filter(narrow)
for e in accept_events:
self.assertTrue(narrow_filter(e))
for e in reject_events:
self.assertFalse(narrow_filter(e))
def test_build_narrow_filter_invalid(self):
# type: () -> None
with self.assertRaises(JsonableError):
build_narrow_filter(["invalid_operator", "operand"])
class IncludeHistoryTest(ZulipTestCase):
def test_ok_to_include_history(self):
# type: () -> None
realm = get_realm('zulip')
self.make_stream('public_stream', realm=realm)
# Negated stream searches should not include history.
narrow = [
dict(operator='stream', operand='public_stream', negated=True),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# Definitely forbid seeing history on private streams.
narrow = [
dict(operator='stream', operand='private_stream'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# History doesn't apply to PMs.
narrow = [
dict(operator='is', operand='private'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# History doesn't apply to unread messages.
narrow = [
dict(operator='is', operand='unread'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# If we are looking for something like starred messages, there is
# no point in searching historical messages.
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='is', operand='starred'),
]
self.assertFalse(ok_to_include_history(narrow, realm))
# simple True case
narrow = [
dict(operator='stream', operand='public_stream'),
]
self.assertTrue(ok_to_include_history(narrow, realm))
narrow = [
dict(operator='stream', operand='public_stream'),
dict(operator='topic', operand='whatever'),
dict(operator='search', operand='needle in haystack'),
]
self.assertTrue(ok_to_include_history(narrow, realm))
class GetOldMessagesTest(ZulipTestCase):
def get_and_check_messages(self, modified_params):
# type: (Dict[str, Union[str, int]]) -> Dict[str, Dict]
post_params = {"anchor": 1, "num_before": 1, "num_after": 1} # type: Dict[str, Union[str, int]]
post_params.update(modified_params)
payload = self.client_get("/json/messages", dict(post_params))
self.assert_json_success(payload)
result = ujson.loads(payload.content)
self.assertIn("messages", result)
self.assertIsInstance(result["messages"], list)
for message in result["messages"]:
for field in ("content", "content_type", "display_recipient",
"avatar_url", "recipient_id", "sender_full_name",
"sender_short_name", "timestamp", "reactions"):
self.assertIn(field, message)
return result
def get_query_ids(self):
# type: () -> Dict[Text, int]
hamlet_user = self.example_user('hamlet')
othello_user = self.example_user('othello')
query_ids = {} # type: Dict[Text, int]
scotland_stream = get_stream('Scotland', hamlet_user.realm)
query_ids['scotland_recipient'] = get_recipient(Recipient.STREAM, scotland_stream.id).id
query_ids['hamlet_id'] = hamlet_user.id
query_ids['othello_id'] = othello_user.id
query_ids['hamlet_recipient'] = get_recipient(Recipient.PERSONAL, hamlet_user.id).id
query_ids['othello_recipient'] = get_recipient(Recipient.PERSONAL, othello_user.id).id
return query_ids
def test_successful_get_messages_reaction(self):
# type: () -> None
"""
Test old `/json/messages` returns reactions.
"""
self.login(self.example_email("hamlet"))
messages = self.get_and_check_messages(dict())
message_id = messages['messages'][0]['id']
self.login(self.example_email("othello"))
reaction_name = 'slightly_smiling_face'
url = '/json/messages/{}/emoji_reactions/{}'.format(message_id, reaction_name)
payload = self.client_put(url)
self.assert_json_success(payload)
self.login(self.example_email("hamlet"))
messages = self.get_and_check_messages({})
message_to_assert = None
for message in messages['messages']:
if message['id'] == message_id:
message_to_assert = message
break
assert(message_to_assert is not None)
self.assertEqual(len(message_to_assert['reactions']), 1)
self.assertEqual(message_to_assert['reactions'][0]['emoji_name'],
reaction_name)
def test_successful_get_messages(self):
# type: () -> None
"""
A call to GET /json/messages with valid parameters returns a list of
messages.
"""
self.login(self.example_email("hamlet"))
self.get_and_check_messages(dict())
# We have to support the legacy tuple style while there are old
# clients around, which might include third party home-grown bots.
self.get_and_check_messages(dict(narrow=ujson.dumps([['pm-with', self.example_email("othello")]])))
self.get_and_check_messages(dict(narrow=ujson.dumps([dict(operator='pm-with', operand=self.example_email("othello"))])))
def test_get_messages_with_narrow_pm_with(self):
# type: () -> None
"""
A request for old messages with a narrow by pm-with only returns
conversations with that user.
"""
me = self.example_email('hamlet')
def dr_emails(dr):
# type: (Union[Text, List[Dict[str, Any]]]) -> Text
assert isinstance(dr, list)
return ','.join(sorted(set([r['email'] for r in dr] + [me])))
self.send_message(me, self.example_email("iago"), Recipient.PERSONAL)
self.send_message(me,
[self.example_email("iago"), self.example_email("cordelia")],
Recipient.HUDDLE)
personals = [m for m in get_user_messages(self.example_user('hamlet'))
if m.recipient.type == Recipient.PERSONAL or
m.recipient.type == Recipient.HUDDLE]
for personal in personals:
emails = dr_emails(get_display_recipient(personal.recipient))
self.login(me)
narrow = [dict(operator='pm-with', operand=emails)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(dr_emails(message['display_recipient']), emails)
def test_get_messages_with_narrow_group_pm_with(self):
# type: () -> None
"""
A request for old messages with a narrow by group-pm-with only returns
group-private conversations with that user.
"""
me = self.example_email("hamlet")
matching_message_ids = []
matching_message_ids.append(self.send_message(me, [self.example_email("iago"), self.example_email("cordelia"), self.example_email("othello")], Recipient.HUDDLE))
matching_message_ids.append(self.send_message(me, [self.example_email("cordelia"), self.example_email("othello")], Recipient.HUDDLE))
non_matching_message_ids = []
non_matching_message_ids.append(self.send_message(me, self.example_email("cordelia"), Recipient.PERSONAL))
non_matching_message_ids.append(self.send_message(me, [self.example_email("iago"), self.example_email("othello")], Recipient.HUDDLE))
non_matching_message_ids.append(self.send_message(self.example_email("cordelia"), [self.example_email("iago"), self.example_email("othello")], Recipient.HUDDLE))
self.login(me)
narrow = [dict(operator='group-pm-with', operand=self.example_email("cordelia"))]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertIn(message["id"], matching_message_ids)
self.assertNotIn(message["id"], non_matching_message_ids)
def test_get_messages_with_narrow_stream(self):
# type: () -> None
"""
A request for old messages with a narrow by stream only returns
messages for that stream.
"""
self.login(self.example_email('hamlet'))
# We need to subscribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.subscribe_to_stream(self.example_email("hamlet"), 'Scotland')
self.send_message(self.example_email("hamlet"), "Scotland", Recipient.STREAM)
messages = get_user_messages(self.example_user('hamlet'))
stream_messages = [msg for msg in messages if msg.recipient.type == Recipient.STREAM]
stream_name = get_display_recipient(stream_messages[0].recipient)
stream_id = stream_messages[0].recipient.id
narrow = [dict(operator='stream', operand=stream_name)]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(message["type"], "stream")
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_stream_mit_unicode_regex(self):
# type: () -> None
"""
A request for old messages for a user in the mit.edu relam with unicode
stream name should be correctly escaped in the database query.
"""
self.login(self.mit_email("starnine"))
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
lambda_stream_name = u"\u03bb-stream"
self.subscribe_to_stream(self.mit_email("starnine"), lambda_stream_name)
lambda_stream_d_name = u"\u03bb-stream.d"
self.subscribe_to_stream(self.mit_email("starnine"), lambda_stream_d_name)
self.send_message(self.mit_email("starnine"), u"\u03bb-stream", Recipient.STREAM)
self.send_message(self.mit_email("starnine"), u"\u03bb-stream.d", Recipient.STREAM)
narrow = [dict(operator='stream', operand=u'\u03bb-stream')]
result = self.get_and_check_messages(dict(num_after=2,
narrow=ujson.dumps(narrow)))
messages = get_user_messages(self.mit_user("starnine"))
stream_messages = [msg for msg in messages if msg.recipient.type == Recipient.STREAM]
self.assertEqual(len(result["messages"]), 2)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_topic_mit_unicode_regex(self):
# type: () -> None
"""
A request for old messages for a user in the mit.edu realm with unicode
topic name should be correctly escaped in the database query.
"""
mit_user_profile = self.mit_user("starnine")
email = mit_user_profile.email
self.login(email)
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.subscribe_to_stream(email, "Scotland")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic.d.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic.d.d.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"\u03bb-topic.d.d.d.d")
narrow = [dict(operator='topic', operand=u'\u03bb-topic')]
result = self.get_and_check_messages(dict(
num_after=100,
narrow=ujson.dumps(narrow)))
messages = get_user_messages(mit_user_profile)
stream_messages = [msg for msg in messages if msg.recipient.type == Recipient.STREAM]
self.assertEqual(len(result["messages"]), 5)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_topic_mit_personal(self):
# type: () -> None
"""
We handle .d grouping for MIT realm personal messages correctly.
"""
mit_user_profile = self.mit_user("starnine")
email = mit_user_profile.email
self.login(email) # We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
self.subscribe_to_stream(email, "Scotland")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u".d.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"PERSONAL")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u'(instance "").d')
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u".d.d.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u"personal.d")
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u'(instance "")')
self.send_message(email, "Scotland", Recipient.STREAM,
subject=u".d.d.d.d")
narrow = [dict(operator='topic', operand=u'personal.d.d')]
result = self.get_and_check_messages(dict(
num_before=50,
num_after=50,
narrow=ujson.dumps(narrow)))
messages = get_user_messages(mit_user_profile)
stream_messages = [msg for msg in messages if msg.recipient.type == Recipient.STREAM]
self.assertEqual(len(result["messages"]), 7)
for i, message in enumerate(result["messages"]):
self.assertEqual(message["type"], "stream")
stream_id = stream_messages[i].recipient.id
self.assertEqual(message["recipient_id"], stream_id)
def test_get_messages_with_narrow_sender(self):
# type: () -> None
"""
A request for old messages with a narrow by sender only returns
messages sent by that person.
"""
self.login(self.example_email("hamlet"))
# We need to send a message here to ensure that we actually
# have a stream message in this narrow view.
self.send_message(self.example_email("hamlet"), "Scotland", Recipient.STREAM)
self.send_message(self.example_email("othello"), "Scotland", Recipient.STREAM)
self.send_message(self.example_email("othello"), self.example_email("hamlet"), Recipient.PERSONAL)
self.send_message(self.example_email("iago"), "Scotland", Recipient.STREAM)
narrow = [dict(operator='sender', operand=self.example_email("othello"))]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow)))
for message in result["messages"]:
self.assertEqual(message["sender_email"], self.example_email("othello"))
def _update_tsvector_index(self):
# type: () -> None
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE zerver_message SET
search_tsvector = to_tsvector('zulip.english_us_search',
subject || rendered_content)
""")
@override_settings(USING_PGROONGA=False)
def test_messages_in_narrow(self):
# type: () -> None
email = self.example_email("cordelia")
self.login(email)
def send(content):
# type: (Text) -> int
msg_id = self.send_message(
sender_name=email,
raw_recipients="Verona",
message_type=Recipient.STREAM,
content=content,
)
return msg_id
good_id = send('KEYWORDMATCH and should work')
bad_id = send('no match')
msg_ids = [good_id, bad_id]
send('KEYWORDMATCH but not in msg_ids')
self._update_tsvector_index()
narrow = [
dict(operator='search', operand='KEYWORDMATCH'),
]
raw_params = dict(msg_ids=msg_ids, narrow=narrow)
params = {k: ujson.dumps(v) for k, v in raw_params.items()}
result = self.client_get('/json/messages/matches_narrow', params)
self.assert_json_success(result)
messages = result.json()['messages']
self.assertEqual(len(list(messages.keys())), 1)
message = messages[str(good_id)]
self.assertEqual(message['match_content'],
u'<p><span class="highlight">KEYWORDMATCH</span> and should work</p>')
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search(self):
# type: () -> None
self.login(self.example_email("cordelia"))
messages_to_search = [
('breakfast', 'there are muffins in the conference room'),
('lunch plans', 'I am hungry!'),
('meetings', 'discuss lunch after lunch'),
('meetings', 'please bring your laptops to take notes'),
('dinner', 'Anybody staying late tonight?'),
('urltest', 'https://google.com'),
]
next_message_id = self.get_last_message().id + 1
for topic, content in messages_to_search:
self.send_message(
sender_name=self.example_email("cordelia"),
raw_recipients="Verona",
message_type=Recipient.STREAM,
content=content,
subject=topic,
)
self._update_tsvector_index()
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand='lunch'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_before=0,
num_after=10,
)) # type: Dict[str, Dict]
self.assertEqual(len(result['messages']), 2)
messages = result['messages']
narrow = [dict(operator='search', operand='https://google.com')]
link_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_before=0,
num_after=10,
)) # type: Dict[str, Dict]
self.assertEqual(len(link_search_result['messages']), 1)
self.assertEqual(link_search_result['messages'][0]['match_content'],
'<p><a href="https://google.com" target="_blank" title="https://google.com">https://<span class="highlight">google.com</span></a></p>')
meeting_message = [m for m in messages if m['subject'] == 'meetings'][0]
self.assertEqual(
meeting_message['match_subject'],
'meetings')
self.assertEqual(
meeting_message['match_content'],
'<p>discuss <span class="highlight">lunch</span> after ' +
'<span class="highlight">lunch</span></p>')
meeting_message = [m for m in messages if m['subject'] == 'lunch plans'][0]
self.assertEqual(
meeting_message['match_subject'],
'<span class="highlight">lunch</span> plans')
self.assertEqual(
meeting_message['match_content'],
'<p>I am hungry!</p>')
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict(operator='search', operand='discuss'),
dict(operator='search', operand='after'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Dict]
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'], '<p><span class="highlight">discuss</span> lunch <span class="highlight">after</span> lunch</p>')
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_not_subscribed(self):
# type: () -> None
"""Verify support for searching a stream you're not subscribed to"""
self.subscribe_to_stream(self.example_email("hamlet"), "newstream")
self.send_message(
sender_name=self.example_email("hamlet"),
raw_recipients="newstream",
message_type=Recipient.STREAM,
content="Public special content!",
subject="new",
)
self._update_tsvector_index()
self.login(self.example_email("cordelia"))
stream_search_narrow = [
dict(operator='search', operand='special'),
dict(operator='stream', operand='newstream'),
]
stream_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(stream_search_narrow),
anchor=0,
num_after=10,
num_before=10,
)) # type: Dict[str, Dict]
self.assertEqual(len(stream_search_result['messages']), 1)
self.assertEqual(stream_search_result['messages'][0]['match_content'],
'<p>Public <span class="highlight">special</span> content!</p>')
@override_settings(USING_PGROONGA=True)
def test_get_messages_with_search_pgroonga(self):
# type: () -> None
self.login(self.example_email("cordelia"))
next_message_id = self.get_last_message().id + 1
messages_to_search = [
(u'日本語', u'こんにちは。今日はいい天気ですね。'),
(u'日本語', u'今朝はごはんを食べました。'),
(u'日本語', u'昨日、日本のお菓子を送りました。'),
('english', u'I want to go to 日本!'),
('english', 'Can you speak https://en.wikipedia.org/wiki/Japanese?'),
('english', 'https://google.com'),
]
for topic, content in messages_to_search:
self.send_message(
sender_name=self.example_email("cordelia"),
raw_recipients="Verona",
message_type=Recipient.STREAM,
content=content,
subject=topic,
)
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE zerver_message SET
search_pgroonga = subject || ' ' || rendered_content
""")
narrow = [
dict(operator='search', operand=u'日本'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Dict]
self.assertEqual(len(result['messages']), 4)
messages = result['messages']
japanese_message = [m for m in messages if m['subject'] == u'日本語'][-1]
self.assertEqual(
japanese_message['match_subject'],
u'<span class="highlight">日本</span>語')
self.assertEqual(
japanese_message['match_content'],
u'<p>昨日、<span class="highlight">日本</span>の' +
u'お菓子を送りました。</p>')
english_message = [m for m in messages if m['subject'] == 'english'][0]
self.assertEqual(
english_message['match_subject'],
'english')
self.assertIn(
english_message['match_content'],
# NOTE: The whitespace here is off due to a pgroonga bug.
# This bug is a pgroonga regression and according to one of
# the author, this should be fixed in its next release.
[u'<p>I want to go to <span class="highlight">日本</span>!</p>', # This is correct.
u'<p>I want to go to<span class="highlight"> 日本</span>!</p>', ])
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict(operator='search', operand='can'),
dict(operator='search', operand='speak'),
dict(operator='search', operand='wiki'),
]
multi_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(multi_search_narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Dict]
self.assertEqual(len(multi_search_result['messages']), 1)
self.assertEqual(multi_search_result['messages'][0]['match_content'],
'<p><span class="highlight">Can</span> you <span class="highlight">speak</span> <a href="https://en.wikipedia.org/wiki/Japanese" target="_blank" title="https://en.wikipedia.org/wiki/Japanese">https://en.<span class="highlight">wiki</span>pedia.org/<span class="highlight">wiki</span>/Japanese</a>?</p>')
narrow = [dict(operator='search', operand='https://google.com')]
link_search_result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
num_before=0,
)) # type: Dict[str, Dict]
self.assertEqual(len(link_search_result['messages']), 1)
self.assertEqual(link_search_result['messages'][0]['match_content'],
'<p><a href="https://google.com" target="_blank" title="https://google.com"><span class="highlight">https://google.com</span></a></p>')
def test_messages_in_narrow_for_non_search(self):
# type: () -> None
email = self.example_email("cordelia")
self.login(email)
def send(content):
# type: (Text) -> int
msg_id = self.send_message(
sender_name=email,
raw_recipients="Verona",
message_type=Recipient.STREAM,
subject='test_topic',
content=content,
)
return msg_id
good_id = send('http://foo.com')
bad_id = send('no link here')
msg_ids = [good_id, bad_id]
send('http://bar.com but not in msg_ids')
narrow = [
dict(operator='has', operand='link'),
]
raw_params = dict(msg_ids=msg_ids, narrow=narrow)
params = {k: ujson.dumps(v) for k, v in raw_params.items()}
result = self.client_get('/json/messages/matches_narrow', params)
self.assert_json_success(result)
messages = result.json()['messages']
self.assertEqual(len(list(messages.keys())), 1)
message = messages[str(good_id)]
self.assertIn('a href=', message['match_content'])
self.assertIn('http://foo.com', message['match_content'])
self.assertEqual(message['match_subject'], 'test_topic')
def test_get_messages_with_only_searching_anchor(self):
# type: () -> None
"""
Test that specifying an anchor but 0 for num_before and num_after
returns at most 1 message.
"""
self.login(self.example_email("cordelia"))
anchor = self.send_message(self.example_email("cordelia"), "Verona", Recipient.STREAM)
narrow = [dict(operator='sender', operand=self.example_email("cordelia"))]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0)) # type: Dict[str, Dict]
self.assertEqual(len(result['messages']), 1)
narrow = [dict(operator='is', operand='mentioned')]
result = self.get_and_check_messages(dict(narrow=ujson.dumps(narrow),
anchor=anchor, num_before=0,
num_after=0))
self.assertEqual(len(result['messages']), 0)
def test_missing_params(self):
# type: () -> None
"""
anchor, num_before, and num_after are all required
POST parameters for get_messages.
"""
self.login(self.example_email("hamlet"))
required_args = (("anchor", 1), ("num_before", 1), ("num_after", 1)) # type: Tuple[Tuple[Text, int], ...]
for i in range(len(required_args)):
post_params = dict(required_args[:i] + required_args[i + 1:])
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Missing '%s' argument" % (required_args[i][0],))
def test_bad_int_params(self):
# type: () -> None
"""
num_before, num_after, and narrow must all be non-negative
integers or strings that can be converted to non-negative integers.
"""
self.login(self.example_email("hamlet"))
other_params = [("narrow", {}), ("anchor", 0)]
int_params = ["num_before", "num_after"]
bad_types = (False, "", "-1", -1)
for idx, param in enumerate(int_params):
for type in bad_types:
# Rotate through every bad type for every integer
# parameter, one at a time.
post_params = dict(other_params + [(param, type)] +
[(other_param, 0) for other_param in
int_params[:idx] + int_params[idx + 1:]]
)
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Bad value for '%s': %s" % (param, type))
def test_bad_narrow_type(self):
# type: () -> None
"""
narrow must be a list of string pairs.
"""
self.login(self.example_email("hamlet"))
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)] # type: List[Tuple[Text, Union[int, str, bool]]]
bad_types = (False, 0, '', '{malformed json,',
'{foo: 3}', '[1,2]', '[["x","y","z"]]') # type: Tuple[Union[int, str, bool], ...]
for type in bad_types:
post_params = dict(other_params + [("narrow", type)])
result = self.client_get("/json/messages", post_params)
self.assert_json_error(result,
"Bad value for 'narrow': %s" % (type,))
def test_bad_narrow_operator(self):
# type: () -> None
"""
Unrecognized narrow operators are rejected.
"""
self.login(self.example_email("hamlet"))
for operator in ['', 'foo', 'stream:verona', '__init__']:
narrow = [dict(operator=operator, operand='')]
params = dict(anchor=0, num_before=0, num_after=0, narrow=ujson.dumps(narrow))
result = self.client_get("/json/messages", params)
self.assert_json_error_contains(result,
"Invalid narrow operator: unknown operator")
def test_non_string_narrow_operand_in_dict(self):
# type: () -> None
"""
We expect search operands to be strings, not integers.
"""
self.login(self.example_email("hamlet"))
not_a_string = 42
narrow = [dict(operator='stream', operand=not_a_string)]
params = dict(anchor=0, num_before=0, num_after=0, narrow=ujson.dumps(narrow))
result = self.client_get("/json/messages", params)
self.assert_json_error_contains(result, 'elem["operand"] is not a string')
def exercise_bad_narrow_operand(self, operator, operands, error_msg):
# type: (Text, Sequence, Text) -> None
other_params = [("anchor", 0), ("num_before", 0), ("num_after", 0)] # type: List
for operand in operands:
post_params = dict(other_params + [
("narrow", ujson.dumps([[operator, operand]]))])
result = self.client_get("/json/messages", post_params)
self.assert_json_error_contains(result, error_msg)
def test_bad_narrow_stream_content(self):
# type: () -> None
"""
If an invalid stream name is requested in get_messages, an error is
returned.
"""
self.login(self.example_email("hamlet"))
bad_stream_content = (0, [], ["x", "y"]) # type: Sequence
self.exercise_bad_narrow_operand("stream", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_one_on_one_email_content(self):
# type: () -> None
"""
If an invalid 'pm-with' is requested in get_messages, an
error is returned.
"""
self.login(self.example_email("hamlet"))
bad_stream_content = (0, [], ["x", "y"]) # type: Tuple[int, List[None], List[Text]]
self.exercise_bad_narrow_operand("pm-with", bad_stream_content,
"Bad value for 'narrow'")
def test_bad_narrow_nonexistent_stream(self):
# type: () -> None
self.login(self.example_email("hamlet"))
self.exercise_bad_narrow_operand("stream", ['non-existent stream'],
"Invalid narrow operator: unknown stream")
def test_bad_narrow_nonexistent_email(self):
# type: () -> None
self.login(self.example_email("hamlet"))
self.exercise_bad_narrow_operand("pm-with", ['[email protected]'],
"Invalid narrow operator: unknown user")
def test_message_without_rendered_content(self):
# type: () -> None
"""Older messages may not have rendered_content in the database"""
m = self.get_last_message()
m.rendered_content = m.rendered_content_version = None
m.content = 'test content'
# Use to_dict_uncached_helper directly to avoid having to deal with remote cache
d = MessageDict.to_dict_uncached_helper(m, True)
self.assertEqual(d['content'], '<p>test content</p>')
def common_check_get_messages_query(self, query_params, expected):
# type: (Dict[str, object], Text) -> None
user_profile = self.example_user('hamlet')
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as queries:
get_messages_backend(request, user_profile)
for query in queries:
if "/* get_messages */" in query['sql']:
sql = str(query['sql']).replace(" /* get_messages */", '')
self.assertEqual(sql, expected)
return
raise AssertionError("get_messages query not found")
def test_use_first_unread_anchor_with_some_unread_messages(self):
# type: () -> None
user_profile = self.example_user('hamlet')
# Have Othello send messages to Hamlet that he hasn't read.
self.send_message(self.example_email("othello"), "Scotland", Recipient.STREAM)
last_message_id_to_hamlet = self.send_message(self.example_email("othello"), self.example_email("hamlet"), Recipient.PERSONAL)
# Add a few messages that help us test that our query doesn't
# look at messages that are irrelevant to Hamlet.
self.send_message(self.example_email("othello"), self.example_email("cordelia"), Recipient.PERSONAL)
self.send_message(self.example_email("othello"), self.example_email("iago"), Recipient.PERSONAL)
query_params = dict(
use_first_unread_anchor='true',
anchor=0,
num_before=10,
num_after=10,
narrow='[]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
# Verify the query for old messages looks correct.
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
sql = queries[0]['sql']
self.assertNotIn('AND message_id = %s' % (LARGER_THAN_MAX_MESSAGE_ID,), sql)
self.assertIn('ORDER BY message_id ASC', sql)
cond = 'WHERE user_profile_id = %d AND message_id >= %d' % (user_profile.id, last_message_id_to_hamlet)
self.assertIn(cond, sql)
cond = 'WHERE user_profile_id = %d AND message_id <= %d' % (user_profile.id, last_message_id_to_hamlet - 1)
self.assertIn(cond, sql)
def test_use_first_unread_anchor_with_no_unread_messages(self):
# type: () -> None
user_profile = self.example_user('hamlet')
query_params = dict(
use_first_unread_anchor='true',
anchor=0,
num_before=10,
num_after=10,
narrow='[]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
# Next, verify the use_first_unread_anchor setting invokes
# the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack.
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
self.assertIn('AND message_id <= %d' % (LARGER_THAN_MAX_MESSAGE_ID - 1,), queries[0]['sql'])
# There should not be an after_query in this case, since it'd be useless
self.assertNotIn('AND message_id >= %d' % (LARGER_THAN_MAX_MESSAGE_ID,), queries[0]['sql'])
def test_use_first_unread_anchor_with_muted_topics(self):
# type: () -> None
"""
Test that our logic related to `use_first_unread_anchor`
invokes the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack for
the `/* get_messages */` query when relevant muting
is in effect.
This is a very arcane test on arcane, but very heavily
field-tested, logic in get_messages_backend(). If
this test breaks, be absolutely sure you know what you're
doing.
"""
realm = get_realm('zulip')
self.make_stream('web stuff')
user_profile = self.example_user('hamlet')
user_profile.muted_topics = ujson.dumps([['Scotland', 'golf'], ['web stuff', 'css'], ['bogus', 'bogus']])
user_profile.save()
query_params = dict(
use_first_unread_anchor='true',
anchor=0,
num_before=0,
num_after=0,
narrow='[["stream", "Scotland"]]'
)
request = POSTRequestMock(query_params, user_profile)
with queries_captured() as all_queries:
get_messages_backend(request, user_profile)
# Do some tests on the main query, to verify the muting logic
# runs on this code path.
queries = [q for q in all_queries if str(q['sql']).startswith("SELECT message_id, flags")]
self.assertEqual(len(queries), 1)
stream = get_stream('Scotland', realm)
recipient_id = get_recipient(Recipient.STREAM, stream.id).id
cond = '''AND NOT (recipient_id = {scotland} AND upper(subject) = upper('golf'))'''.format(scotland=recipient_id)
self.assertIn(cond, queries[0]['sql'])
# Next, verify the use_first_unread_anchor setting invokes
# the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack.
queries = [q for q in all_queries if '/* get_messages */' in q['sql']]
self.assertEqual(len(queries), 1)
self.assertIn('AND message_id = %d' % (LARGER_THAN_MAX_MESSAGE_ID,),
queries[0]['sql'])
def test_exclude_muting_conditions(self):
# type: () -> None
realm = get_realm('zulip')
self.make_stream('web stuff')
user_profile = self.example_user('hamlet')
# Test the do-nothing case first.
user_profile.muted_topics = ujson.dumps([['irrelevant_stream', 'irrelevant_topic']])
user_profile.save()
# If nothing relevant is muted, then exclude_muting_conditions()
# should return an empty list.
narrow = [
dict(operator='stream', operand='Scotland'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
self.assertEqual(muting_conditions, [])
# Ok, now set up our muted topics to include a topic relevant to our narrow.
user_profile.muted_topics = ujson.dumps([['Scotland', 'golf'], ['web stuff', 'css'], ['bogus', 'bogus']])
user_profile.save()
# And verify that our query will exclude them.
narrow = [
dict(operator='stream', operand='Scotland'),
]
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id").label("message_id")], None, table("zerver_message"))
query = query.where(*muting_conditions)
expected_query = '''
SELECT id AS message_id
FROM zerver_message
WHERE NOT (recipient_id = :recipient_id_1 AND upper(subject) = upper(:upper_1))
'''
self.assertEqual(fix_ws(query), fix_ws(expected_query))
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['upper_1'], 'golf')
mute_stream(realm, user_profile, 'Verona')
narrow = []
muting_conditions = exclude_muting_conditions(user_profile, narrow)
query = select([column("id")], None, table("zerver_message"))
query = query.where(and_(*muting_conditions))
expected_query = '''
SELECT id
FROM zerver_message
WHERE recipient_id NOT IN (:recipient_id_1)
AND NOT
(recipient_id = :recipient_id_2 AND upper(subject) = upper(:upper_1) OR
recipient_id = :recipient_id_3 AND upper(subject) = upper(:upper_2))'''
self.assertEqual(fix_ws(query), fix_ws(expected_query))
params = get_sqlalchemy_query_params(query)
self.assertEqual(params['recipient_id_1'], get_recipient_id_for_stream_name(realm, 'Verona'))
self.assertEqual(params['recipient_id_2'], get_recipient_id_for_stream_name(realm, 'Scotland'))
self.assertEqual(params['upper_1'], 'golf')
self.assertEqual(params['recipient_id_3'], get_recipient_id_for_stream_name(realm, 'web stuff'))
self.assertEqual(params['upper_2'], 'css')
def test_get_messages_queries(self):
# type: () -> None
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 100 ORDER BY message_id DESC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 0}, sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM ((SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id <= 99 ORDER BY message_id DESC \n LIMIT 10) UNION ALL (SELECT message_id, flags \nFROM zerver_usermessage \nWHERE user_profile_id = {hamlet_id} AND message_id >= 100 ORDER BY message_id ASC \n LIMIT 11)) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 100, 'num_before': 10, 'num_after': 10}, sql)
def test_get_messages_with_narrow_queries(self):
# type: () -> None
query_ids = self.get_query_ids()
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient}) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (flags & 2) != 0 AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["is", "starred"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {othello_id} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["sender", "%s"]]' % (self.example_email("othello"),)},
sql)
sql_template = 'SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"]]'},
sql)
sql_template = "SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND upper(subject) = upper('blah') AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["topic", "blah"]]'},
sql)
sql_template = "SELECT anon_1.message_id \nFROM (SELECT id AS message_id \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND upper(subject) = upper('blah') AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["topic", "blah"]]'},
sql)
# Narrow to pms with yourself
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND sender_id = {hamlet_id} AND recipient_id = {hamlet_recipient} AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["pm-with", "%s"]]' % (self.example_email("hamlet"),)},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags \nFROM (SELECT message_id, flags \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND recipient_id = {scotland_recipient} AND (flags & 2) != 0 AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["is", "starred"]]'},
sql)
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_queries(self):
# type: () -> None
query_ids = self.get_query_ids()
sql_template = "SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT message_id, flags, subject, rendered_content, ts_match_locs_array('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping')) AS content_matches, ts_match_locs_array('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping')) AS subject_matches \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC" # type: Text
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["search", "jumping"]]'},
sql)
sql_template = "SELECT anon_1.message_id, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT id AS message_id, subject, rendered_content, ts_match_locs_array('zulip.english_us_search', rendered_content, plainto_tsquery('zulip.english_us_search', 'jumping')) AS content_matches, ts_match_locs_array('zulip.english_us_search', escape_html(subject), plainto_tsquery('zulip.english_us_search', 'jumping')) AS subject_matches \nFROM zerver_message \nWHERE recipient_id = {scotland_recipient} AND (search_tsvector @@ plainto_tsquery('zulip.english_us_search', 'jumping')) AND zerver_message.id >= 0 ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC"
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["stream", "Scotland"], ["search", "jumping"]]'},
sql)
sql_template = 'SELECT anon_1.message_id, anon_1.flags, anon_1.subject, anon_1.rendered_content, anon_1.content_matches, anon_1.subject_matches \nFROM (SELECT message_id, flags, subject, rendered_content, ts_match_locs_array(\'zulip.english_us_search\', rendered_content, plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AS content_matches, ts_match_locs_array(\'zulip.english_us_search\', escape_html(subject), plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AS subject_matches \nFROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \nWHERE user_profile_id = {hamlet_id} AND (content ILIKE \'%jumping%\' OR subject ILIKE \'%jumping%\') AND (search_tsvector @@ plainto_tsquery(\'zulip.english_us_search\', \'"jumping" quickly\')) AND message_id >= 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC'
sql = sql_template.format(**query_ids)
self.common_check_get_messages_query({'anchor': 0, 'num_before': 0, 'num_after': 10,
'narrow': '[["search", "\\"jumping\\" quickly"]]'},
sql)
@override_settings(USING_PGROONGA=False)
def test_get_messages_with_search_using_email(self):
# type: () -> None
self.login(self.example_email("cordelia"))
messages_to_search = [
('say hello', 'How are you doing, @**Othello, the Moor of Venice**?'),
('lunch plans', 'I am hungry!'),
]
next_message_id = self.get_last_message().id + 1
for topic, content in messages_to_search:
self.send_message(
sender_name=self.example_email("cordelia"),
raw_recipients="Verona",
message_type=Recipient.STREAM,
content=content,
subject=topic,
)
self._update_tsvector_index()
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand=self.example_email("othello")),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
)) # type: Dict[str, Dict]
self.assertEqual(len(result['messages']), 0)
narrow = [
dict(operator='sender', operand=self.example_email("cordelia")),
dict(operator='search', operand='othello'),
]
result = self.get_and_check_messages(dict(
narrow=ujson.dumps(narrow),
anchor=next_message_id,
num_after=10,
))
self.assertEqual(len(result['messages']), 1)
messages = result['messages']
meeting_message = [m for m in messages if m['subject'] == 'say hello'][0]
self.assertEqual(
meeting_message['match_subject'],
'say hello')
self.assertEqual(
meeting_message['match_content'],
('<p>How are you doing, <span class="user-mention" data-user-email="%s" data-user-id="6">' +
'@<span class="highlight">Othello</span>, the Moor of Venice</span>?</p>') % (
self.example_email("othello"),))
|
{
"content_hash": "9417430d8347b7deb325e83ba5818c21",
"timestamp": "",
"source": "github",
"line_count": 1509,
"max_line_length": 914,
"avg_line_length": 48.77070907886017,
"alnum_prop": 0.5998369454446634,
"repo_name": "vaidap/zulip",
"id": "d7e8eee7e8ce89d015ecc871480fb14b593f7e4c",
"size": "73765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_narrow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "416449"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "472724"
},
{
"name": "JavaScript",
"bytes": "2123247"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "84574"
},
{
"name": "Python",
"bytes": "3669105"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "44486"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .utils import parse_message_attributes
from .models import sqs_backends
from .exceptions import (
MessageAttributesInvalid,
MessageNotInflight,
ReceiptHandleIsInvalid
)
MAXIMUM_VISIBILTY_TIMEOUT = 43200
SQS_REGION_REGEX = r'://(.+?)\.queue\.amazonaws\.com'
class QueuesResponse(BaseResponse):
region_regex = SQS_REGION_REGEX
@property
def sqs_backend(self):
return sqs_backends[self.region]
def create_queue(self):
visibility_timeout = None
if 'Attribute.1.Name' in self.querystring and self.querystring.get('Attribute.1.Name')[0] == 'VisibilityTimeout':
visibility_timeout = self.querystring.get("Attribute.1.Value")[0]
queue_name = self.querystring.get("QueueName")[0]
queue = self.sqs_backend.create_queue(queue_name, visibility_timeout=visibility_timeout)
template = self.response_template(CREATE_QUEUE_RESPONSE)
return template.render(queue=queue)
def get_queue_url(self):
queue_name = self.querystring.get("QueueName")[0]
queue = self.sqs_backend.get_queue(queue_name)
if queue:
template = self.response_template(GET_QUEUE_URL_RESPONSE)
return template.render(queue=queue)
else:
return "", dict(status=404)
def list_queues(self):
queue_name_prefix = self.querystring.get("QueueNamePrefix", [None])[0]
queues = self.sqs_backend.list_queues(queue_name_prefix)
template = self.response_template(LIST_QUEUES_RESPONSE)
return template.render(queues=queues)
class QueueResponse(BaseResponse):
region_regex = SQS_REGION_REGEX
@property
def sqs_backend(self):
return sqs_backends[self.region]
def change_message_visibility(self):
queue_name = self.path.split("/")[-1]
receipt_handle = self.querystring.get("ReceiptHandle")[0]
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
return "Invalid request, maximum visibility timeout is {0}".format(
MAXIMUM_VISIBILTY_TIMEOUT
), dict(status=400)
try:
self.sqs_backend.change_message_visibility(
queue_name=queue_name,
receipt_handle=receipt_handle,
visibility_timeout=visibility_timeout
)
except (ReceiptHandleIsInvalid, MessageNotInflight) as e:
return "Invalid request: {0}".format(e.description), dict(status=e.status_code)
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE)
return template.render()
def get_queue_attributes(self):
queue_name = self.path.split("/")[-1]
queue = self.sqs_backend.get_queue(queue_name)
template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE)
return template.render(queue=queue)
def set_queue_attributes(self):
queue_name = self.path.split("/")[-1]
key = camelcase_to_underscores(self.querystring.get('Attribute.Name')[0])
value = self.querystring.get('Attribute.Value')[0]
self.sqs_backend.set_queue_attribute(queue_name, key, value)
return SET_QUEUE_ATTRIBUTE_RESPONSE
def delete_queue(self):
queue_name = self.path.split("/")[-1]
queue = self.sqs_backend.delete_queue(queue_name)
if not queue:
return "A queue with name {0} does not exist".format(queue_name), dict(status=404)
template = self.response_template(DELETE_QUEUE_RESPONSE)
return template.render(queue=queue)
def send_message(self):
message = self.querystring.get("MessageBody")[0]
delay_seconds = self.querystring.get('DelaySeconds')
if delay_seconds:
delay_seconds = int(delay_seconds[0])
else:
delay_seconds = 0
try:
message_attributes = parse_message_attributes(self.querystring)
except MessageAttributesInvalid as e:
return e.description, dict(status=e.status_code)
queue_name = self.path.split("/")[-1]
message = self.sqs_backend.send_message(
queue_name,
message,
message_attributes=message_attributes,
delay_seconds=delay_seconds
)
template = self.response_template(SEND_MESSAGE_RESPONSE)
return template.render(message=message, message_attributes=message_attributes)
def send_message_batch(self):
"""
The querystring comes like this
'SendMessageBatchRequestEntry.1.DelaySeconds': ['0'],
'SendMessageBatchRequestEntry.1.MessageBody': ['test message 1'],
'SendMessageBatchRequestEntry.1.Id': ['6d0f122d-4b13-da2c-378f-e74244d8ad11']
'SendMessageBatchRequestEntry.2.Id': ['ff8cbf59-70a2-c1cb-44c7-b7469f1ba390'],
'SendMessageBatchRequestEntry.2.MessageBody': ['test message 2'],
'SendMessageBatchRequestEntry.2.DelaySeconds': ['0'],
"""
queue_name = self.path.split("/")[-1]
messages = []
for index in range(1, 11):
# Loop through looking for messages
message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format(index)
message_body = self.querystring.get(message_key)
if not message_body:
# Found all messages
break
message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format(index)
message_user_id = self.querystring.get(message_user_id_key)[0]
delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format(index)
delay_seconds = self.querystring.get(delay_key, [None])[0]
message = self.sqs_backend.send_message(queue_name, message_body[0], delay_seconds=delay_seconds)
message.user_id = message_user_id
message_attributes = parse_message_attributes(self.querystring, base='SendMessageBatchRequestEntry.{0}.'.format(index), value_namespace='')
if type(message_attributes) == tuple:
return message_attributes[0], message_attributes[1]
message.message_attributes = message_attributes
messages.append(message)
template = self.response_template(SEND_MESSAGE_BATCH_RESPONSE)
return template.render(messages=messages)
def delete_message(self):
queue_name = self.path.split("/")[-1]
receipt_handle = self.querystring.get("ReceiptHandle")[0]
self.sqs_backend.delete_message(queue_name, receipt_handle)
template = self.response_template(DELETE_MESSAGE_RESPONSE)
return template.render()
def delete_message_batch(self):
"""
The querystring comes like this
'DeleteMessageBatchRequestEntry.1.Id': ['message_1'],
'DeleteMessageBatchRequestEntry.1.ReceiptHandle': ['asdfsfs...'],
'DeleteMessageBatchRequestEntry.2.Id': ['message_2'],
'DeleteMessageBatchRequestEntry.2.ReceiptHandle': ['zxcvfda...'],
...
"""
queue_name = self.path.split("/")[-1]
message_ids = []
for index in range(1, 11):
# Loop through looking for messages
receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format(index)
receipt_handle = self.querystring.get(receipt_key)
if not receipt_handle:
# Found all messages
break
self.sqs_backend.delete_message(queue_name, receipt_handle[0])
message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format(index)
message_user_id = self.querystring.get(message_user_id_key)[0]
message_ids.append(message_user_id)
template = self.response_template(DELETE_MESSAGE_BATCH_RESPONSE)
return template.render(message_ids=message_ids)
def purge_queue(self):
queue_name = self.path.split("/")[-1]
self.sqs_backend.purge_queue(queue_name)
template = self.response_template(PURGE_QUEUE_RESPONSE)
return template.render()
def receive_message(self):
queue_name = self.path.split("/")[-1]
message_count = int(self.querystring.get("MaxNumberOfMessages")[0])
messages = self.sqs_backend.receive_messages(queue_name, message_count)
template = self.response_template(RECEIVE_MESSAGE_RESPONSE)
output = template.render(messages=messages)
return output
CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<CreateQueueResult>
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>
7a62c49f-347e-4fc4-9331-6e8e7a96aa73
</RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<GetQueueUrlResult>
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl>
</GetQueueUrlResult>
<ResponseMetadata>
<RequestId>470a6f13-2ed9-4181-ad8a-2fdea142988e</RequestId>
</ResponseMetadata>
</GetQueueUrlResponse>"""
LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
<ListQueuesResult>
{% for queue in queues %}
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl>
{% endfor %}
</ListQueuesResult>
<ResponseMetadata>
<RequestId>
725275ae-0b9b-4762-b238-436d7c65a1ac
</RequestId>
</ResponseMetadata>
</ListQueuesResponse>"""
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
<ResponseMetadata>
<RequestId>
6fde8d1e-52cd-4581-8cd9-c512f4c64223
</RequestId>
</ResponseMetadata>
</DeleteQueueResponse>"""
GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
<GetQueueAttributesResult>
{% for key, value in queue.attributes.items() %}
<Attribute>
<Name>{{ key }}</Name>
<Value>{{ value }}</Value>
</Attribute>
{% endfor %}
</GetQueueAttributesResult>
<ResponseMetadata>
<RequestId>1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b</RequestId>
</ResponseMetadata>
</GetQueueAttributesResponse>"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
<ResponseMetadata>
<RequestId>
e5cca473-4fc0-4198-a451-8abb94d02c75
</RequestId>
</ResponseMetadata>
</SetQueueAttributesResponse>"""
SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
{{ message.md5 }}
</MD5OfMessageBody>
{% if message.message_attributes.items()|count > 0 %}
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
{% endif %}
<MessageId>
{{ message.id }}
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>"""
RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
<ReceiveMessageResult>
{% for message in messages %}
<Message>
<MessageId>{{ message.id }}</MessageId>
<ReceiptHandle>{{ message.receipt_handle }}</ReceiptHandle>
<MD5OfBody>{{ message.md5 }}</MD5OfBody>
<Body>{{ message.body }}</Body>
<Attribute>
<Name>SenderId</Name>
<Value>{{ message.sender_id }}</Value>
</Attribute>
<Attribute>
<Name>SentTimestamp</Name>
<Value>{{ message.sent_timestamp }}</Value>
</Attribute>
<Attribute>
<Name>ApproximateReceiveCount</Name>
<Value>{{ message.approximate_receive_count }}</Value>
</Attribute>
<Attribute>
<Name>ApproximateFirstReceiveTimestamp</Name>
<Value>{{ message.approximate_first_receive_timestamp }}</Value>
</Attribute>
{% if message.message_attributes.items()|count > 0 %}
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
{% endif %}
{% for name, value in message.message_attributes.items() %}
<MessageAttribute>
<Name>{{ name }}</Name>
<Value>
<DataType>{{ value.data_type }}</DataType>
{% if 'Binary' in value.data_type %}
<BinaryValue>{{ value.binary_value }}</BinaryValue>
{% else %}
<StringValue>{{ value.string_value }}</StringValue>
{% endif %}
</Value>
</MessageAttribute>
{% endfor %}
</Message>
{% endfor %}
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>
b6633655-283d-45b4-aee4-4e84e0ae6afa
</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
<SendMessageBatchResult>
{% for message in messages %}
<SendMessageBatchResultEntry>
<Id>{{ message.user_id }}</Id>
<MessageId>{{ message.id }}</MessageId>
<MD5OfMessageBody>{{ message.md5 }}</MD5OfMessageBody>
{% if message.message_attributes.items()|count > 0 %}
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
{% endif %}
</SendMessageBatchResultEntry>
{% endfor %}
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>"""
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
<ResponseMetadata>
<RequestId>
b5293cb5-d306-4a17-9048-b263635abe42
</RequestId>
</ResponseMetadata>
</DeleteMessageResponse>"""
DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
<DeleteMessageBatchResult>
{% for message_id in message_ids %}
<DeleteMessageBatchResultEntry>
<Id>{{ message_id }}</Id>
</DeleteMessageBatchResultEntry>
{% endfor %}
</DeleteMessageBatchResult>
<ResponseMetadata>
<RequestId>d6f86b7a-74d1-4439-b43f-196a1e29cd85</RequestId>
</ResponseMetadata>
</DeleteMessageBatchResponse>"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
<ResponseMetadata>
<RequestId>
6a7a282a-d013-4a59-aba9-335b0fa48bed
</RequestId>
</ResponseMetadata>
</ChangeMessageVisibilityResponse>"""
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
<ResponseMetadata>
<RequestId>
6fde8d1e-52cd-4581-8cd9-c512f4c64223
</RequestId>
</ResponseMetadata>
</PurgeQueueResponse>"""
|
{
"content_hash": "62852d5aad7d0e77af3ffaa0e1f178a5",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 151,
"avg_line_length": 37.29207920792079,
"alnum_prop": 0.6371963361210673,
"repo_name": "jotes/moto",
"id": "69b3f63df1d3c667c3b0371f529705c6e24e80af",
"size": "15066",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "moto/sqs/responses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1409322"
}
],
"symlink_target": ""
}
|
"""This processor reshapes the data to match the fiscal schema."""
from datapackage_pipelines.wrapper import ingest
from datapackage_pipelines.wrapper import spew
from common.utilities import get_fiscal_field_names
import logging
def process_row(row, fiscal_fields):
"""Add and remove appropriate columns.
"""
surplus_keys = set(row) - set(fiscal_fields)
missing_keys = set(fiscal_fields) - set(row)
for key in missing_keys:
row[key] = None
for key in surplus_keys:
del row[key]
assert set(row) == set(fiscal_fields)
return row
def process_resources(resources, fiscal_fields):
"""Return an iterator of row iterators.
"""
for resource in resources:
def process_rows(resource_):
for i, row in enumerate(resource_):
yield process_row(row, fiscal_fields)
yield process_rows(resource)
if __name__ == '__main__':
parameters_, datapackage_, resources_ = ingest()
for resource in datapackage_['resources']:
fiscal_fields_ = set(get_fiscal_field_names())
fields = resource['schema']['fields']
new_fields = []
for field in fields:
if field['name'] in fiscal_fields_:
new_fields.append(field)
fiscal_fields_.remove(field['name'])
for f in fiscal_fields_:
new_fields.append({
'name': f,
'type': 'string'
})
resource['schema']['fields'] = new_fields
fiscal_fields_ = set(get_fiscal_field_names())
new_resources_ = process_resources(resources_, fiscal_fields_)
spew(datapackage_, new_resources_)
|
{
"content_hash": "543e0fb5cad23ec7e6b89771d33aaf07",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 32.588235294117645,
"alnum_prop": 0.6155234657039711,
"repo_name": "Victordeleon/os-data-importers",
"id": "a65b2212e72eb44db65ce491a76aad6aef93a992",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eu-structural-funds/common/processors/reshape_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "69558"
},
{
"name": "Jupyter Notebook",
"bytes": "99238"
},
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "195915"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from . import models
class PhotoAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if not obj.id:
obj.owner = request.user
obj.save()
class AlbumAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
if not obj.id:
obj.owner = request.user
obj.save()
admin.site.register(models.Manufacturer)
admin.site.register(models.Camera)
admin.site.register(models.Photo, PhotoAdmin)
admin.site.register(models.Album, AlbumAdmin)
|
{
"content_hash": "d2d50526cb59e9b068b1ccc6d8a7c57b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 53,
"avg_line_length": 24.636363636363637,
"alnum_prop": 0.7158671586715867,
"repo_name": "ateoto/django-glair",
"id": "84a4630e5b692bbb5b7c402fee5d7492fee2f8f5",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glair/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23105"
},
{
"name": "Python",
"bytes": "90293"
}
],
"symlink_target": ""
}
|
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from sqlalchemy.orm import exc
from sqlalchemy.orm.mapper import (
Mapper,
_mapper_registry,
class_mapper,
)
from sqlalchemy.orm.interfaces import (
EXT_CONTINUE,
EXT_STOP,
ExtensionOption,
InstrumentationManager,
MapperExtension,
PropComparator,
SessionExtension,
AttributeExtension,
)
from sqlalchemy.orm.util import (
AliasedClass as aliased,
Validator,
join,
object_mapper,
outerjoin,
polymorphic_union,
with_parent,
)
from sqlalchemy.orm.properties import (
ColumnProperty,
ComparableProperty,
CompositeProperty,
RelationshipProperty,
PropertyLoader,
SynonymProperty,
)
from sqlalchemy.orm import mapper as mapperlib
from sqlalchemy.orm.mapper import reconstructor, validates
from sqlalchemy.orm import strategies
from sqlalchemy.orm.query import AliasOption, Query
from sqlalchemy.sql import util as sql_util
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.session import object_session, sessionmaker, \
make_transient
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy import util as sa_util
__all__ = (
'EXT_CONTINUE',
'EXT_STOP',
'InstrumentationManager',
'MapperExtension',
'AttributeExtension',
'Validator',
'PropComparator',
'Query',
'Session',
'aliased',
'backref',
'class_mapper',
'clear_mappers',
'column_property',
'comparable_property',
'compile_mappers',
'composite',
'contains_alias',
'contains_eager',
'create_session',
'defer',
'deferred',
'dynamic_loader',
'eagerload',
'eagerload_all',
'extension',
'immediateload',
'join',
'joinedload',
'joinedload_all',
'lazyload',
'mapper',
'make_transient',
'noload',
'object_mapper',
'object_session',
'outerjoin',
'polymorphic_union',
'reconstructor',
'relationship',
'relation',
'scoped_session',
'sessionmaker',
'subqueryload',
'subqueryload_all',
'synonym',
'undefer',
'undefer_group',
'validates'
)
def scoped_session(session_factory, scopefunc=None):
"""Provides thread-local or scoped management of :class:`.Session` objects.
This is a front-end function to
:class:`.ScopedSession`.
:param session_factory: a callable function that produces
:class:`Session` instances, such as :func:`sessionmaker`.
:param scopefunc: Optional "scope" function which would be
passed to the :class:`.ScopedRegistry`. If None, the
:class:`.ThreadLocalRegistry` is used by default.
:returns: an :class:`.ScopedSession` instance
Usage::
Session = scoped_session(sessionmaker(autoflush=True))
To instantiate a Session object which is part of the scoped context,
instantiate normally::
session = Session()
Most session methods are available as classmethods from the scoped
session::
Session.commit()
Session.close()
"""
return ScopedSession(session_factory, scopefunc=scopefunc)
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
def relationship(argument, secondary=None, **kwargs):
"""Provide a relationship of a primary Mapper to a secondary Mapper.
.. note:: :func:`relationship` is historically known as
:func:`relation` prior to version 0.6.
This corresponds to a parent-child or associative table relationship. The
constructed class is an instance of :class:`RelationshipProperty`.
A typical :func:`relationship`::
mapper(Parent, properties={
'children': relationship(Children)
})
:param argument:
a class or :class:`Mapper` instance, representing the target of
the relationship.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table. The *secondary* keyword argument should generally only
be used for a table that is not otherwise expressed in any class
mapping. In particular, using the Association Object Pattern is
generally mutually exclusive with the use of the *secondary*
keyword argument.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute. (New in 0.6.6)
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`backref` object to control the configuration of the
new relationship.
:param back_populates:
Takes a string name and has the same meaning as ``backref``,
except the complementing property is **not** created automatically,
and instead must be configured explicitly on the other mapper. The
complementing property should also indicate ``back_populates``
to this relationship to ensure proper functioning.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used. The default value is ``"save-update, merge"``.
Available cascades are:
* ``save-update`` - cascade the :meth:`.Session.add`
operation. This cascade applies both to future and
past calls to :meth:`~sqlalchemy.orm.session.Session.add`,
meaning new items added to a collection or scalar relationship
get placed into the same session as that of the parent, and
also applies to items which have been removed from this
relationship but are still part of unflushed history.
* ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge`
operation
* ``expunge`` - cascade the :meth:`.Session.expunge`
operation
* ``delete`` - cascade the :meth:`.Session.delete`
operation
* ``delete-orphan`` - if an item of the child's type with no
parent is detected, mark it for deletion. Note that this
option prevents a pending item of the child's class from being
persisted without a parent present.
* ``refresh-expire`` - cascade the :meth:`.Session.expire`
and :meth:`~sqlalchemy.orm.session.Session.refresh` operations
* ``all`` - shorthand for "save-update,merge, refresh-expire,
expunge, delete"
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along a backref event. When set to ``False`` on a
one-to-many relationship that has a many-to-one backref, assigning
a persistent object to the many-to-one attribute on a transient object
will not add the transient to the session. Similarly, when
set to ``False`` on a many-to-one relationship that has a one-to-many
backref, appending a persistent object to the one-to-many collection
on a transient object will not add the transient to the session.
``cascade_backrefs`` is new in 0.6.5.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
Behavior of this attribute is described in detail at
:ref:`custom_collections`.
:param comparator_factory:
a class which extends :class:`RelationshipProperty.Comparator` which
provides custom SQL clause generation for comparison operations.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class. These listeners
will receive append and set events before the operation
proceeds, and may be used to halt (via exception throw) or
change the value used in the operation.
:param foreign_keys:
a list of columns which are to be used as "foreign key" columns.
Normally, :func:`relationship` uses the :class:`.ForeignKey`
and :class:`.ForeignKeyConstraint` objects present within the
mapped or secondary :class:`.Table` to determine the "foreign" side of
the join condition. This is used to construct SQL clauses in order
to load objects, as well as to "synchronize" values from
primary key columns to referencing foreign key columns.
The ``foreign_keys`` parameter overrides the notion of what's
"foreign" in the table metadata, allowing the specification
of a list of :class:`.Column` objects that should be considered
part of the foreign key.
There are only two use cases for ``foreign_keys`` - one, when it is not
convenient for :class:`.Table` metadata to contain its own foreign key
metadata (which should be almost never, unless reflecting a large amount of
tables from a MySQL MyISAM schema, or a schema that doesn't actually
have foreign keys on it). The other is for extremely
rare and exotic composite foreign key setups where some columns
should artificially not be considered as foreign.
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is strictly one of performance, as inner joins
generally perform better than outer joins. This flag can
be set to ``True`` when the relationship references an object
via many-to-one using local foreign keys that are not nullable,
or when the reference is one-to-one or a collection that is
guaranteed to have one or at least one entry.
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is first
accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references. (new as of 0.6.5)
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the ``innerjoin``
parameter.
* ``subquery`` - items should be loaded "eagerly" within the same
query as that of the parent, using a second SQL statement
which issues a JOIN to a subquery of the original
statement.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`~sqlalchemy.orm.query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. The dynamic
collection supports a limited set of mutation operations,
allowing ``append()`` and ``remove()``. Changes to the
collection will not be visible until flushed
to the database, where it is then refetched upon iteration.
* True - a synonym for 'select'
* False - a synonyn for 'joined'
* None - a synonym for 'noload'
Detailed discussion of loader strategies is at :ref:`loading_toplevel`.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it has
never been flushed. This may take effect for a pending object when
autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection. Attachment of transient objects to the session without
moving to the "pending" state is not a supported behavior at this time.
Note that the load of related objects on a pending or transient object
also does not trigger any attribute change events - no user-defined
events will be emitted for these attributes, and if and when the
object is ultimately flushed, only the user-specific foreign key
attributes will be part of the modified state.
The load_on_pending flag does not improve behavior
when the ORM is used normally - object references should be constructed
at the object level, not at the foreign key level, so that they
are present in an ordinary way before flush() proceeds. This flag
is not not intended for general use.
New in 0.6.5.
:param order_by:
indicates the ordering that should be applied when loading these
items.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on ``mapper()``.
A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a ``flush()`` operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use ``post_update`` to "break" the cycle.
:param primaryjoin:
a ColumnElement (i.e. WHERE criterion) that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or association
table).
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:param secondaryjoin:
a ColumnElement (i.e. WHERE criterion) that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association and
child tables.
:param single_parent=(True|False):
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its
usage is optional unless delete-orphan cascade is also
set on this relationship(), in which case its required (new in 0.5.2).
:param uselist=(True|False):
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by ``relationship()``, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set uselist to False.
:param viewonly=False:
when set to True, the relationship is used only for loading objects
within the relationship, and has no effect on the unit-of-work
flush process. Relationships with viewonly can specify any kind of
join conditions to provide additional views of related objects
onto a parent object. Note that the functionality of a viewonly
relationship has its limits - complicated join conditions may
not compile into eager or lazy loaders properly. If this is the
case, use an alternative method.
"""
return RelationshipProperty(argument, secondary=secondary, **kwargs)
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, secondary=None, primaryjoin=None,
secondaryjoin=None, foreign_keys=None, backref=None,
post_update=False, cascade=False, remote_side=None,
enable_typechecks=True, passive_deletes=False, doc=None,
order_by=None, comparator_factory=None, query_class=None):
"""Construct a dynamically-loading mapper property.
This property is similar to :func:`relationship`, except read
operations return an active :class:`Query` object which reads from
the database when accessed. Items may be appended to the
attribute via ``append()``, or removed via ``remove()``; changes
will be persisted to the database during a :meth:`Sesion.flush`.
However, no other Python list or collection mutation operations
are available.
A subset of arguments available to :func:`relationship` are available
here.
:param argument:
a class or :class:`Mapper` instance, representing the target of
the relationship.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table. The *secondary* keyword argument should generally only
be used for a table that is not otherwise expressed in any class
mapping. In particular, using the Association Object Pattern is
generally mutually exclusive with the use of the *secondary*
keyword argument.
:param query_class:
Optional, a custom Query subclass to be used as the basis for
dynamic collection.
"""
from sqlalchemy.orm.dynamic import DynaLoader
return RelationshipProperty(
argument, secondary=secondary, primaryjoin=primaryjoin,
secondaryjoin=secondaryjoin, foreign_keys=foreign_keys,
backref=backref,
post_update=post_update, cascade=cascade, remote_side=remote_side,
enable_typechecks=enable_typechecks, passive_deletes=passive_deletes,
order_by=order_by, comparator_factory=comparator_factory,doc=doc,
strategy_class=DynaLoader, query_class=query_class)
def column_property(*args, **kwargs):
"""Provide a column-level property for use with a Mapper.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the :class:`.Column` element directly.
Use this function when the given column is not directly present within the
mapper's selectable; examples include SQL expressions, functions, and
scalar SELECT queries.
Columns that aren't present in the mapper's selectable won't be persisted
by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Normally, history tracking logic for
simple non-primary-key scalar values only needs to be
aware of the "new" value in order to perform a flush. This
flag is available for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute. (new in 0.6.6)
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param extension:
an :class:`~sqlalchemy.orm.interfaces.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
These listeners will receive append and set events before the
operation proceeds, and may be used to halt (via exception throw)
or change the value used in the operation.
"""
return ColumnProperty(*args, **kwargs)
def composite(class_, *cols, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documention section :ref:`mapper_composite` for a full
usage example.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Note that attributes generated by
:func:`.composite` properties load the "previous" value
in any case, however this is being changed in 0.7,
so the flag is introduced here for forwards compatibility.
(new in 0.6.6)
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does not
load immediately, and is instead loaded when the attribute is first
accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param extension:
an :class:`~sqlalchemy.orm.interfaces.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
These listeners will receive append and set events before the
operation proceeds, and may be used to halt (via exception throw)
or change the value used in the operation.
"""
return CompositeProperty(class_, *cols, **kwargs)
def backref(name, **kwargs):
"""Create a back reference with explicit arguments, which are the same
arguments one can send to :func:`relationship`.
Used with the `backref` keyword argument to :func:`relationship` in
place of a string argument.
"""
return (name, kwargs)
def deferred(*columns, **kwargs):
"""Return a :class:`DeferredColumnProperty`, which indicates this
object attributes should only be loaded from its corresponding
table column when first accessed.
Used with the `properties` dictionary sent to :func:`mapper`.
"""
return ColumnProperty(deferred=True, *columns, **kwargs)
def mapper(class_, local_table=None, *args, **params):
"""Return a new :class:`~.Mapper` object.
:param class\_: The class to be mapped.
:param local_table: The table to which the class is mapped, or None if
this mapper inherits from another mapper using concrete table
inheritance.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_null_pks: This flag is deprecated - this is stated as
allow_partial_pks which defaults to True.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Indicates that save operations of multiple entities
can be batched together for efficiency. setting to False indicates
that an instance will be fully saved before saving the next
instance, which includes inserting/updating all table rows
corresponding to the entity as well as calling all
:class:`.MapperExtension` methods corresponding to the save
operation.
:param column_prefix: A string which will be prepended to the `key`
name of all :class:`.Column` objects when creating
column-based properties from the
given :class:`.Table`. Does not affect explicitly specified
column-based properties
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
:param exclude_properties: A list or set of string column names to
be excluded from mapping. As of SQLAlchemy 0.6.4, this collection
may also include :class:`.Column` objects. Columns named or present
in this list will not be automatically mapped. Note that neither
this option nor include_properties will allow one to circumvent plan
Python inheritance - if mapped class ``B`` inherits from mapped
class ``A``, no combination of includes or excludes will allow ``B``
to have fewer properties than its superclass, ``A``.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension`
instances which will be applied to all operations by this
:class:`.Mapper`.
:param include_properties: An inclusive list or set of string column
names to map. As of SQLAlchemy 0.6.4, this collection may also
include :class:`.Column` objects in order to disambiguate between
same-named columns in a selectable (such as a
:func:`~.expression.join()`). If this list is not ``None``, columns
present in the mapped table but not named or present in this list
will not be automatically mapped. See also "exclude_properties".
:param inherits: Another :class:`.Mapper` for which
this :class:`.Mapper` will have an inheritance
relationship with.
:param inherit_condition: For joined table inheritance, a SQL
expression (constructed
:class:`.ClauseElement`) which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When inherit_condition is used and the
condition contains no ForeignKey columns, specify the "foreign"
columns of the join condition in this list. else leave as None.
:param non_primary: Construct a :class:`Mapper` that will define only
the selection of instances, not their persistence. Any number of
non_primary mappers may be created for a particular class.
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. Defaults to the OID/ROWID of the table if
any, or the first primary key column of the table.
:param passive_updates: Indicates UPDATE behavior of foreign keys
when a primary key changes on a joined-table inheritance or other
joined table mapping.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent rows.
Note that with databases which enforce referential integrity (i.e.
PostgreSQL, MySQL with InnoDB tables), ON UPDATE CASCADE is
required for this operation. The relationship() will update the
value of the attribute on related items which are locally present
in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE (i.e.
SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on :func:`relationship()`.
A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param polymorphic_on: Used with mappers in an inheritance
relationship, a :class:`.Column` which will identify the class/mapper
combination to be used with a particular row. Requires the
``polymorphic_identity`` value to be set for all mappers in the
inheritance hierarchy. The column specified by ``polymorphic_on``
is usually a column that resides directly within the base mapper's
mapped table; alternatively, it may be a column that is only
present within the <selectable> portion of the ``with_polymorphic``
argument.
:param polymorphic_identity: A value which will be stored in the
Column denoted by polymorphic_on, corresponding to the class
identity of this mapper.
:param properties: A dictionary mapping the string names of object
attributes to ``MapperProperty`` instances, which define the
persistence behavior of that attribute. Note that the columns in
the mapped table are automatically converted into
``ColumnProperty`` instances based on the ``key`` property of each
:class:`.Column` (although they can be overridden using this dictionary).
:param primary_key: A list of :class:`.Column` objects which define the
primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column` which must have an integer type
that will be used to keep a running version id of mapped entities
in the database. this is used during save operations to ensure that
no other thread or process has updated the instance during the
lifetime of the entity, else a :class:`StaleDataError` exception is
thrown.
:param version_id_generator: A callable which defines the algorithm
used to generate new version ids. Defaults to an integer
generator. Can be replaced with one that generates timestamps,
uuids, etc. e.g.::
import uuid
mapper(Cls, table,
version_id_col=table.c.version_uuid,
version_id_generator=lambda version:uuid.uuid4().hex
)
The callable receives the current version identifier as its
single argument.
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes. Normally, it is left as None, in which case this mapper
will form an outer join from the base mapper's table to that of
all desired sub-mappers. When specified, it provides the
selectable to be used for polymorphic loading. When
with_polymorphic includes mappers which load from a "concrete"
inheriting table, the <selectable> argument is required, since it
usually requires more complex UNION queries.
"""
return Mapper(class_, local_table, *args, **params)
def synonym(name, map_column=False, descriptor=None,
comparator_factory=None, doc=None):
"""Set up `name` as a synonym to another mapped property.
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`.
Any existing attributes on the class which map the key name sent
to the ``properties`` dictionary will be used by the synonym to provide
instance-attribute behavior (that is, any Python property object, provided
by the ``property`` builtin or providing a ``__get__()``, ``__set__()``
and ``__del__()`` method). If no name exists for the key, the
``synonym()`` creates a default getter/setter object automatically and
applies it to the class.
`name` refers to the name of the existing mapped property, which can be
any other ``MapperProperty`` including column-based properties and
relationships.
If `map_column` is ``True``, an additional ``ColumnProperty`` is created
on the mapper automatically, using the synonym's name as the keyname of
the property, and the keyname of this ``synonym()`` as the name of the
column to map. For example, if a table has a column named ``status``::
class MyClass(object):
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = value
status = property(_get_status, _set_status)
mapper(MyClass, sometable, properties={
"status":synonym("_status", map_column=True)
})
The column named ``status`` will be mapped to the attribute named
``_status``, and the ``status`` attribute on ``MyClass`` will be used to
proxy access to the column-based attribute.
"""
return SynonymProperty(name, map_column=map_column,
descriptor=descriptor,
comparator_factory=comparator_factory,
doc=doc)
def comparable_property(comparator_factory, descriptor=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
Allows a regular Python @property (descriptor) to be used in Queries and
SQL constructs like a managed attribute. comparable_property wraps a
descriptor with a proxy that directs operator overrides such as ==
(__eq__) to the supplied comparator but proxies everything else through to
the original descriptor::
from sqlalchemy.orm import mapper, comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
class MyClass(object):
@property
def myprop(self):
return 'foo'
class MyComparator(PropComparator):
def __eq__(self, other):
return func.lower(other) == foo
mapper(MyClass, mytable, properties={
'myprop': comparable_property(MyComparator)})
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`.
Note that :func:`comparable_property` is usually not needed for basic
needs. The recipe at :mod:`.derived_attributes` offers a simpler
pure-Python method of achieving a similar result using class-bound
attributes with SQLAlchemy expression constructs.
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retreived from the
mapped class if left blank in a ``properties`` declaration.
"""
return ComparableProperty(comparator_factory, descriptor)
def compile_mappers():
"""Compile all mappers that have been defined.
This is equivalent to calling ``compile()`` on any individual mapper.
"""
for m in list(_mapper_registry):
m.compile()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class itself
is garbage collected, its mapper is automatically disposed of as well. As
such, :func:`.clear_mappers` is only for usage in test suites that re-use
the same classes with different mappings, which is itself an extremely rare
use case - the only such use case is in fact SQLAlchemy's own test suite,
and possibly the test suites of other ORM extension libraries which
intend to test various combinations of mapper construction upon a fixed
set of classes.
"""
mapperlib._COMPILE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._COMPILE_MUTEX.release()
def extension(ext):
"""Return a ``MapperOption`` that will insert the given
``MapperExtension`` to the beginning of the list of extensions
that will be called in the context of the ``Query``.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return ExtensionOption(ext)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def joinedload(*keys, **kw):
"""Return a ``MapperOption`` that will convert the property of the given
name into an joined eager load.
.. note:: This function is known as :func:`eagerload` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload` will remain available for the foreseeable
future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# joined-load the "orders" colleciton on "User"
query(User).options(joinedload(User.orders))
# joined-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(joinedload(Order.items, Item.keywords))
# to joined-load across both, use joinedload_all()
query(Order).options(joinedload_all(Order.items, Item.keywords))
:func:`joinedload` also accepts a keyword argument `innerjoin=True` which
indicates using an inner join instead of an outer::
query(Order).options(joinedload(Order.user, innerjoin=True))
Note that the join created by :func:`joinedload` is aliased such that no
other aspects of the query will affect what it loads. To use joined eager
loading with a join that is constructed manually using
:meth:`~sqlalchemy.orm.query.Query.join` or :func:`~sqlalchemy.orm.join`,
see :func:`contains_eager`.
See also: :func:`subqueryload`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined'),
strategies.EagerJoinOption(keys, innerjoin)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined')
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def joinedload_all(*keys, **kw):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path into an joined eager load.
.. note:: This function is known as :func:`eagerload_all` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload_all` will remain available for the
foreseeable future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(joinedload_all('orders.items.keywords'))...
will set all of 'orders', 'orders.items', and 'orders.items.keywords' to
load in one joined eager load.
Individual descriptors are accepted as arguments as well::
query.options(joinedload_all(User.orders, Order.items, Item.keywords))
The keyword arguments accept a flag `innerjoin=True|False` which will
override the value of the `innerjoin` flag specified on the
relationship().
See also: :func:`subqueryload_all`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined', chained=True),
strategies.EagerJoinOption(keys, innerjoin, chained=True)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined', chained=True)
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
def subqueryload(*keys):
"""Return a ``MapperOption`` that will convert the property
of the given name into an subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# subquery-load the "orders" colleciton on "User"
query(User).options(subqueryload(User.orders))
# subquery-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(subqueryload(Order.items, Item.keywords))
# to subquery-load across both, use subqueryload_all()
query(Order).options(subqueryload_all(Order.items, Item.keywords))
See also: :func:`joinedload`, :func:`lazyload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery")
def subqueryload_all(*keys):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path into a subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(subqueryload_all('orders.items.keywords'))...
will set all of 'orders', 'orders.items', and 'orders.items.keywords' to
load in one subquery eager load.
Individual descriptors are accepted as arguments as well::
query.options(subqueryload_all(User.orders, Order.items,
Item.keywords))
See also: :func:`joinedload_all`, :func:`lazyload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery", chained=True)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def lazyload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name into a lazy load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=True)
def noload(*keys):
"""Return a ``MapperOption`` that will convert the property of the
given name into a non-load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=None)
def immediateload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name into an immediate load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`
New as of verison 0.6.5.
"""
return strategies.EagerLazyOption(keys, lazy='immediate')
def contains_alias(alias):
"""Return a ``MapperOption`` that will indicate to the query that
the main table has been aliased.
`alias` is the string name or ``Alias`` object representing the
alias.
"""
return AliasOption(alias)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def contains_eager(*keys, **kwargs):
"""Return a ``MapperOption`` that will indicate to the query that
the given attribute should be eagerly loaded from columns currently
in the query.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\\
join(Order.user).\\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
:func:`contains_eager` also accepts an `alias` argument, which is the
string name of an alias, an :func:`~sqlalchemy.sql.expression.alias`
construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when
the eagerly-loaded rows are to come from an aliased table::
user_alias = aliased(User)
sess.query(Order).\\
join((user_alias, Order.user)).\\
options(contains_eager(Order.user, alias=user_alias))
See also :func:`eagerload` for the "automatic" version of this
functionality.
For additional examples of :func:`contains_eager` see
:ref:`contains_eager`.
"""
alias = kwargs.pop('alias', None)
if kwargs:
raise exceptions.ArgumentError('Invalid kwargs for contains_eag'
'er: %r' % kwargs.keys())
return strategies.EagerLazyOption(keys, lazy='joined',
propagate_to_loaders=False), \
strategies.LoadEagerFromAliasOption(keys, alias=alias)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def defer(*keys):
"""Return a ``MapperOption`` that will convert the column property of the
given name into a deferred load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.DeferredOption(keys, defer=True)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def undefer(*keys):
"""Return a ``MapperOption`` that will convert the column property of the
given name into a non-deferred (regular column) load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.DeferredOption(keys, defer=False)
def undefer_group(name):
"""Return a ``MapperOption`` that will convert the given group of deferred
column properties into a non-deferred (regular column) load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.UndeferGroupOption(name)
|
{
"content_hash": "29a0f6cff5045c26b0e208527970d95c",
"timestamp": "",
"source": "github",
"line_count": 1289,
"max_line_length": 95,
"avg_line_length": 41.64313421256788,
"alnum_prop": 0.6858303215470025,
"repo_name": "igemsoftware/SYSU-Software2013",
"id": "87f7568d852109c8c4aafa56aab0212bd5911773",
"size": "53912",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "project/Python27/Lib/site-packages/pypm/external/2/sqlalchemy/orm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "4234"
},
{
"name": "C",
"bytes": "2246655"
},
{
"name": "C#",
"bytes": "30903"
},
{
"name": "C++",
"bytes": "344228"
},
{
"name": "CSS",
"bytes": "437211"
},
{
"name": "F#",
"bytes": "9222"
},
{
"name": "JavaScript",
"bytes": "7288480"
},
{
"name": "Python",
"bytes": "55202181"
},
{
"name": "Shell",
"bytes": "23510"
},
{
"name": "Tcl",
"bytes": "3329368"
},
{
"name": "Visual Basic",
"bytes": "4330"
},
{
"name": "XSLT",
"bytes": "38160"
}
],
"symlink_target": ""
}
|
from pycket import values
from pycket.base import W_Object
from pycket.cont import call_cont, continuation, BaseCont
from pycket.error import SchemeException
from rpython.rlib import jit
# This is a Scheme_Parameterization in Racket
class RootParameterization(object):
def __init__(self):
# This table maps ParamKey -> W_ThreadCell
self.table = {}
# This is a Scheme_Config in Racket
# Except that Scheme_Config uses a functional hash table and this uses a list that we copy
class W_Parameterization(W_Object):
_immutable_fields_ = ["root", "keys", "vals"]
errorname = "parameterization"
def __init__(self, root, keys, vals):
#assert len(params) == len(vals)
self.keys = keys
self.vals = vals
self.root = root
@jit.unroll_safe
def extend(self, params, vals):
# why doesn't it like this assert?
# assert len(params) == len(vals)
# FIXME this is awful
total = len(params) + len(self.keys)
keys = [p.get_key() for p in params]
new_keys = [None] * total
new_vals = [None] * total
for i in range(total):
if i < len(params):
new_keys[i] = keys[i]
new_vals[i] = values.W_ThreadCell(vals[i], True)
else:
new_keys[i] = self.keys[i-len(params)]
new_vals[i] = self.vals[i-len(params)]
return W_Parameterization(self.root, new_keys, new_vals)
@jit.unroll_safe
def get(self, param):
k = param.key
for (i, key) in enumerate(self.keys):
if key is k:
return self.vals[i]
val = self.root.table[k]
assert val
return val
def tostring(self):
return "#<parameterization>"
# This will need to be thread-specific
top_level_config = W_Parameterization(RootParameterization(), [], [])
def find_param_cell(cont, param):
assert isinstance(cont, BaseCont)
p = cont.get_mark_first(values.parameterization_key)
assert isinstance(p, W_Parameterization)
assert isinstance(param, W_Parameter)
v = p.get(param)
assert isinstance(v, values.W_ThreadCell)
return v
@continuation
def param_set_cont(cell, env, cont, vals):
from pycket.interpreter import check_one_val, return_value
v = check_one_val(vals)
cell.set(v)
return return_value(values.w_void, env, cont)
# a token
class ParamKey(object):
pass
class W_BaseParameter(W_Object):
errorname = "parameter"
_attrs_ = ["guard"]
_immutable_fields_ = ["guard"]
def __init__(self, guard=None):
self.guard = None if guard is values.w_false else guard
def iscallable(self):
return True
def get_key(self):
raise NotImplementedError("abstract base class")
def tostring(self):
return "#<parameter-procedure>"
class W_Parameter(W_BaseParameter):
_immutable_fields_ = ["key"]
def __init__(self, val, guard=None):
W_BaseParameter.__init__(self, guard)
self.key = ParamKey()
cell = values.W_ThreadCell(val, True)
top_level_config.root.table[self.key] = cell
def get(self, cont):
return self.get_cell(cont).get()
def get_cell(self, cont):
cell = find_param_cell(cont, self)
assert isinstance(cell, values.W_ThreadCell)
return cell
def get_key(self):
return self.key
def call(self, args, env, cont):
from pycket.interpreter import return_value
if len(args) == 0:
return return_value(self.get(cont), env, cont)
elif len(args) == 1:
cell = find_param_cell(cont, self)
assert isinstance(cell, values.W_ThreadCell)
if self.guard:
return self.guard.call([args[0]], env, param_set_cont(cell, env, cont))
else:
cell.set(args[0])
return return_value(values.w_void, env, cont)
else:
raise SchemeException("wrong number of arguments to parameter")
class W_DerivedParameter(W_BaseParameter):
_immutable_fields_ = ["parameter", "wrap"]
def __init__(self, param, guard, wrap):
W_BaseParameter.__init__(self, guard)
self.parameter = param
self.wrap = wrap
def get(self, cont):
return self.parameter.get(cont)
def get_cell(self, cont):
return self.parameter.get_cell(cont)
def get_key(self):
return self.parameter.get_key()
def call(self, args, env, cont):
from pycket.interpreter import return_value
if len(args) == 0:
return self.parameter.call(args, env, call_cont(self.wrap, env, cont))
elif len(args) == 1:
if self.guard:
return self.guard.call(args, env,
call_cont(self.parameter, env, cont))
return self.parameter.call(args, env, cont)
else:
raise SchemeException("wrong number of arguments to parameter")
|
{
"content_hash": "c0ac21b4148cb25324b668c2cc27257f",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 90,
"avg_line_length": 31.51875,
"alnum_prop": 0.5984533016061868,
"repo_name": "krono/pycket",
"id": "286f7e29b3494263f570985990966cbcc0aedc85",
"size": "5044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycket/values_parameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "137124"
},
{
"name": "KiCad",
"bytes": "241342"
},
{
"name": "Makefile",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "734400"
},
{
"name": "Racket",
"bytes": "419818"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8512"
}
],
"symlink_target": ""
}
|
import re
from VerseTools import Verse
from SyllableTools import isVowel
from SyllableTools import isVisarga
from SyllableTools import isAnusvara
# since we only care about consonants for this figure, we need to get rid of vowels
def getConsonants(line):
consonants = []
for syllable in line:
for i in xrange(len(syllable)):
letter = syllable[i]
# only unique non-vowels and non-visarga/anusvāra
if (not isVowel(letter)) and (not isVisarga(letter)) and (not isAnusvara(letter)) and (not letter in consonants):
consonants.append(letter)
return consonants
# for now we will have a threshold of 4 consonants
def isNiyama(verse):
unique_syllables = []
for pada in verse:
consonants = getConsonants(pada)
if len(consonants) >= 4:
return False
return True
|
{
"content_hash": "c36d7b6a50697601ffd03ecb003ff471",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 119,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.7218137254901961,
"repo_name": "imurchie/citra",
"id": "9b27fd9cfaffd7b15e5eea1667705f4f8b882376",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Niyama.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18680"
}
],
"symlink_target": ""
}
|
print ("gg")
|
{
"content_hash": "62e69b20ba97638a746d08786b009e70",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 12,
"avg_line_length": 12,
"alnum_prop": 0.5833333333333334,
"repo_name": "JaeGyu/PythonEx_1",
"id": "d6e20f1db31a2aeef418a2a5a0a28ee4d538d711",
"size": "37",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "20151212.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13683"
},
{
"name": "HTML",
"bytes": "85013"
},
{
"name": "Java",
"bytes": "247"
},
{
"name": "JavaScript",
"bytes": "188264"
},
{
"name": "Jupyter Notebook",
"bytes": "9391921"
},
{
"name": "Python",
"bytes": "162980"
},
{
"name": "Vue",
"bytes": "6803"
}
],
"symlink_target": ""
}
|
"""Utilities for interacting with Amazon s3 service."""
import re
import boto
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
__author__ = 'Mao Ye, Changshu Liu'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = ['Mao Ye', 'Changshu Liu']
__license__ = 'Apache'
__version__ = '2.0'
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'http_socket_timeout', '180')
LOG = get_log('pinball.common.s3_utils')
def parse_s3_location(s3_location):
"""Parse s3_location to get the bucket name and the rest of the file path.
Args:
s3_location: A string in the form of:
's3n://<bucket_name>/<rest_of_the_file_path>'.
Returns:
bucket_name, rest_of_the_file_path
"""
try:
regex = r'\s*s3n://(.+?)/(.+)'
return re.match(regex, s3_location).groups()
except:
raise Exception('Invalid s3 location: %s' % s3_location)
def get_s3_bucket(bucket_name):
"""Get the Boto s3 bucket reference for the given bucket_name.
Args:
bucket_name: name of s3 bucket.
Returns
s3 bucket object.
"""
connection = boto.connect_s3(PinballConfig.AWS_ACCESS_KEY_ID,
PinballConfig.AWS_SECRET_ACCESS_KEY)
assert connection
bucket = connection.get_bucket(bucket_name, validate=False)
return bucket
def delete_s3_directory(s3_directory):
"""Delete the given s3 directory."""
bucket_name, rest_of_dir_path = parse_s3_location(s3_directory)
bucket = get_s3_bucket(bucket_name)
rest_of_dir_path = rest_of_dir_path \
if rest_of_dir_path[-1] == '/' else rest_of_dir_path + '/'
bucket.delete_keys(bucket.list(prefix=rest_of_dir_path))
|
{
"content_hash": "9d1f89e3d3b1ac7b71f72868f650cad9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 28.171875,
"alnum_prop": 0.6389351081530782,
"repo_name": "pinterest/pinball",
"id": "57f6148080f7a3aaff44b22b3deb76277557c4a0",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinball/common/s3_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100206"
},
{
"name": "HTML",
"bytes": "63554"
},
{
"name": "Java",
"bytes": "3670"
},
{
"name": "JavaScript",
"bytes": "1026836"
},
{
"name": "Makefile",
"bytes": "1016"
},
{
"name": "Python",
"bytes": "895248"
},
{
"name": "Shell",
"bytes": "463"
},
{
"name": "Thrift",
"bytes": "8532"
}
],
"symlink_target": ""
}
|
from ethereum import tester
from datetime import datetime, date
import math
import pytest
slow = pytest.mark.slow
from utilRelay import dblSha256Flip, disablePyethLogging
disablePyethLogging()
class TestBtcChain(object):
CONTRACT_DEBUG = 'test/btcChain_debug.se'
ETHER = 10 ** 18
ANC_DEPTHS = [1, 4, 16, 64, 256, 1024, 4096, 16384]
def setup_class(cls):
tester.gas_limit = int(3.2e6) # include costs of debug methods
cls.s = tester.state()
cls.c = cls.s.abi_contract(cls.CONTRACT_DEBUG, endowment=2000*cls.ETHER)
cls.snapshot = cls.s.snapshot()
cls.seed = tester.seed
def setup_method(self, method):
self.s.revert(self.snapshot)
tester.seed = self.seed
@slow
def testAroundMoreDepths(self):
heaviest = 260
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.testingonlySetHeaviest(heaviest)
forkStartBlock = 999000
parentOfFork = 2
numBlocksInFork = 3
for i in range(numBlocksInFork):
self.c.funcSaveAncestors(forkStartBlock+i, parentOfFork)
parentOfFork = forkStartBlock
finalAncIndex = int(math.ceil(math.log(heaviest) / math.log(4))) # log base 4 of heaviest
# start at 1, instead of 0
for i in range(1, finalAncIndex):
depth = self.ANC_DEPTHS[i]
print('@@@@@@@@@@@@@@@@@@@ depth: '+str(depth))
assert self.c.inMainChain(depth-1) == 1
assert self.c.inMainChain(depth) == 1
assert self.c.inMainChain(depth+1) == 1
for i in range(numBlocksInFork):
assert self.c.inMainChain(forkStartBlock+i) == 0
def testAroundSomeDepths(self):
heaviest = 20
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.testingonlySetHeaviest(heaviest)
forkStartBlock = 999000
parentOfFork = 2
numBlocksInFork = 3
for i in range(numBlocksInFork):
self.c.funcSaveAncestors(forkStartBlock+i, parentOfFork)
parentOfFork = forkStartBlock
finalAncIndex = int(math.ceil(math.log(heaviest) / math.log(4))) # log base 4 of heaviest
# start at 1, instead of 0
for i in range(1, finalAncIndex):
depth = self.ANC_DEPTHS[i]
# print('@@@@@@@@@@@@@@@@@@@ depth: '+str(depth))
assert self.c.inMainChain(depth-1) == 1
assert self.c.inMainChain(depth) == 1
assert self.c.inMainChain(depth+1) == 1
for i in range(numBlocksInFork):
assert self.c.inMainChain(forkStartBlock+i) == 0
for i in range(1, heaviest+1):
assert self.c.getBlockHash(i) == i
def testTiny(self):
self.c.funcSaveAncestors(1, 0)
self.c.funcSaveAncestors(2, 1)
self.c.testingonlySetHeaviest(2)
assert self.c.inMainChain(1) == 1
assert self.c.inMainChain(2) == 1
def testNonExistingBlock(self):
self.c.funcSaveAncestors(1, 0)
self.c.funcSaveAncestors(2, 1)
self.c.testingonlySetHeaviest(2)
assert self.c.inMainChain(9876) == 0
assert self.c.inMainChain(-1) == 0
assert self.c.getBlockHash(9876) == 0
assert self.c.getBlockHash(0) == 0
assert self.c.getBlockHash(-1) == 0
@slow
def testPerfOfStore(self):
heaviest = 2020
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.testingonlySetHeaviest(heaviest)
# self.c.logAnc(heaviest)
for i in range(1, heaviest+1):
assert self.c.getBlockHash(i) == i
def testSmallChain(self):
heaviest = 5
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.testingonlySetHeaviest(heaviest)
forkStartBlock = 999000
parentOfFork = 2
numBlocksInFork = 3
for i in range(numBlocksInFork):
self.c.funcSaveAncestors(forkStartBlock+i, parentOfFork)
parentOfFork = forkStartBlock
for i in range(1, heaviest+1):
assert self.c.inMainChain(i) == 1
for i in range(numBlocksInFork):
assert self.c.inMainChain(forkStartBlock+i) == 0
def testShortFork(self):
heaviest = 5
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.testingonlySetHeaviest(heaviest)
self.c.funcSaveAncestors(30, 2)
self.c.funcSaveAncestors(31, 30)
self.c.funcSaveAncestors(32, 31)
for i in range(1, heaviest+1):
assert self.c.inMainChain(i) == 1
assert self.c.inMainChain(30) == 0
assert self.c.inMainChain(31) == 0
assert self.c.inMainChain(32) == 0
# for i in range(1, heaviest+1):
# self.c.logAnc(i)
# # self.c.logAnc(63)
# # self.c.logAnc(64)
# # self.c.logAnc(65)
# # self.c.logAnc(66)
#
# self.c.logBlockchainHead()
# heaviest is the "fork"
def testAltShortFork(self):
heaviest = 5
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
self.c.funcSaveAncestors(30, 2)
self.c.funcSaveAncestors(31, 30)
self.c.funcSaveAncestors(32, 31)
self.c.testingonlySetHeaviest(32)
for i in range(3, heaviest+1):
assert self.c.inMainChain(i) == 0
assert self.c.inMainChain(30) == 1
assert self.c.inMainChain(31) == 1
assert self.c.inMainChain(32) == 1
# 2 forks from block2
def testMultiShortFork(self):
heaviest = 5
for i in range(1, heaviest+1):
self.c.funcSaveAncestors(i, i-1)
# first fork
self.c.funcSaveAncestors(30, 2)
self.c.funcSaveAncestors(31, 30)
self.c.funcSaveAncestors(32, 31)
# second fork
self.c.funcSaveAncestors(300, 2)
self.c.funcSaveAncestors(310, 300)
self.c.funcSaveAncestors(320, 310)
self.c.testingonlySetHeaviest(heaviest)
for i in range(1, heaviest+1):
assert self.c.inMainChain(i) == 1
assert self.c.inMainChain(30) == 0
assert self.c.inMainChain(31) == 0
assert self.c.inMainChain(32) == 0
assert self.c.inMainChain(300) == 0
assert self.c.inMainChain(310) == 0
assert self.c.inMainChain(320) == 0
self.c.testingonlySetHeaviest(32)
for i in range(3, heaviest+1):
assert self.c.inMainChain(i) == 0
assert self.c.inMainChain(30) == 1
assert self.c.inMainChain(31) == 1
assert self.c.inMainChain(32) == 1
assert self.c.inMainChain(300) == 0
assert self.c.inMainChain(310) == 0
assert self.c.inMainChain(320) == 0
self.c.testingonlySetHeaviest(320)
for i in range(3, heaviest+1):
assert self.c.inMainChain(i) == 0
assert self.c.inMainChain(30) == 0
assert self.c.inMainChain(31) == 0
assert self.c.inMainChain(32) == 0
assert self.c.inMainChain(300) == 1
assert self.c.inMainChain(310) == 1
assert self.c.inMainChain(320) == 1
|
{
"content_hash": "f715ea820bf369198c8b5c6f23ca2264",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 97,
"avg_line_length": 27.896551724137932,
"alnum_prop": 0.5901661859634666,
"repo_name": "ethers/btcrelay",
"id": "c478154c221e335738eacf572460e844b7976800",
"size": "7281",
"binary": false,
"copies": "1",
"ref": "refs/heads/f2",
"path": "test/test_btcChain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181546"
}
],
"symlink_target": ""
}
|
"""Converts an EFP potential to the corresponding
Polarizable Embedding potential
"""
import sys
import fileinput
import numpy
def labelize(label):
"""the goal here is to split the label into more
sensible parts (count and atom label)
"""
idx = int(label[1:3])
symbol = label[3:]
return (idx, symbol)
def labelize_polarizable_point(label):
"""The naming scheme for polarizable points is somewhat
ridiculous. CTn, n=1,2, ... here we label it X
"""
idx = int(label[2:])
symbol = 'X'
return (idx, symbol)
def coordinates(line):
"""LABEL X Y Z WEIGHT CHARGE
"""
# from GAMESS, the labels are A**S where
# S is the symbol of the atom and ** are
# atom counters.
line = line[0]
data = line.split()
label = labelize(data[0])
return (label[0], label[1], map(float, data[1:]))
def monopole(line):
"""LABEL Q Z
the partial charge we are interested in is thus
Q + Z
"""
line = line[0]
data = line.split()
label = labelize(data[0])
values = float(data[1]) + float(data[2])
return (label[0], label[1], [values])
def dipole(line):
"""LABEL X Y Z
"""
line = line[0]
data = line.split()
label = labelize(data[0])
return (label[0], label[1], map(float, data[1:]))
def quadrupoles(line):
"""data comes in in multiple rows, parse it and
remove stuff we do not need
"""
data = []
for item in line:
data.extend(item.split())
#data = line.split()
data.pop(5) # remove the ">"
label = labelize(data[0])
return (label[0], label[1], map(float, data[1:]))
def octopoles(line):
"""data comes in in multiple rows, parse it and
remove stuff we do not need
"""
data = []
for item in line:
data.extend(item.split())
data.pop(10) # remove the ">"
data.pop(5) # remove the ">"
label = labelize(data[0])
return (label[0], label[1], map(float, data[1:]))
def polarizable_point(line):
"""Contrary to EFP, the PE model uses symmetric polarizability
tensors. Let us symmetrise them here and store them that way.
the format is
LABEL X Y Z
followed by components of the tensor
XX XY XZ
YX YY YZ
ZX ZY ZZ
"""
data = []
for item in line:
data.extend(item.split())
data.pop(13) # remove the ">"
data.pop(8) # remove the ">"
label = labelize_polarizable_point(data[0])
data = map(float, data[1:])
polt = numpy.array(data[3:])
polt = numpy.reshape(polt,(3,3))
symt = numpy.dot(polt, polt.transpose()) * 0.5
symt = numpy.ravel(symt)
# this extracts the upper half triangle of the symmetric matrix
data = data[0:3]
data.extend(symt[numpy.array([True, True, True, False, True, True, False, False, True])])
return (label[0], label[1], data)
# keys to parse
# pointer to function that will parse
# number of lines to read before parsing
keys = {'COORDINATES':(coordinates, 1),
'MONOPOLES':(monopole,1),
'DIPOLES':(dipole,1),
'QUADRUPOLES':(quadrupoles,2),
'OCTUPOLES':(octopoles,3),
'POLARIZABLE POINT':(polarizable_point,4)}
values = {}
for key in keys:
if not values.has_key(key):
values[key] = {}
def main():
read_file()
dump_pe_potential()
def read_file():
parsing = False
parser = None
line_data = []
multiline_counter = 0
multiline_count = 0
for line in fileinput.input():
if parsing and "STOP" in line:
parsing = False
parser = None
multiline_counter = 0
if not parsing:
for key in keys:
if key in line and not 'DYN' in line:
parsing = True
parser = keys[key][0]
multiline_count = keys[key][1]
if parsing: break
if parsing: continue
if parsing and multiline_counter < multiline_count:
multiline_counter += 1
line_data.append(line)
if parsing and multiline_counter == multiline_count:
idx,symbol,data = parser(line_data)
values[key][idx] = data
line_data = []
multiline_counter = 0
def dump_pe_potential():
"""The format for the PE potential file is straightforward
first comes coordinates in xyz-format with a label stating
either AU (atomic units) or AA (Angstrom)
2nd is monopoles
3rd is dipoles
4th is quadrupoles
5th is octopoles
6th is alphas (symmetric!)
7th is exclusion list
"""
# we have to make sure of the order of the data is OK
nat = 3
npol= 4
atomids = range(1,nat+1)
print "coordinates"
print "%i" % (nat+npol)
print "AU"
for idx in atomids:
data = values['COORDINATES'][idx]
print "%i %21.12f %21.12f %21.12f" % (idx, data[0], data[1], data[2])
for idx in range(1,npol+1):
data = values['POLARIZABLE POINT'][idx][0:3]
print "%i" % (idx+nat),
for item in data:
print " %20.12f" % (item),
print
print "monopoles"
print "%i" % nat
for idx in atomids:
data = values['MONOPOLES'][idx]
print "%2i %21.12f" % (idx, data[0])
print "dipoles"
print "%i" % nat
for idx in atomids:
data = values['DIPOLES'][idx]
print "%2i %21.12f %21.12f %21.12f" % (idx, data[0], data[1], data[2])
print "quadrupoles"
print "%i" % nat
for idx in atomids:
data = values['QUADRUPOLES'][idx]
print "%2i" % (idx),
for item in data:
print " %20.12f" % (item),
print
print "octopoles"
print "%i" % nat
for idx in atomids:
data = values['OCTUPOLES'][idx]
print "%2i" % (idx),
for item in data:
print " %20.12f" % (item),
print
print "alphas"
print "%i" % npol
for idx in range(1,npol+1):
data = values['POLARIZABLE POINT'][idx][3:]
print "%2i" % (idx+nat),
for item in data:
print " %20.12f" % (item),
print
# the exclusion list avoids self-interaction
# to make a fragment not interact with it self
# we basically permute all points and print it
print "exclists"
print "%i" % (nat+npol)
datas = range(1,nat+npol+1)
for k in range(nat+npol):
for item in datas:
print "%i" % (item),
print
datas.append(datas.pop(0))
if __name__ == '__main__':
main()
|
{
"content_hash": "ec7a0d793a506e84f3bd88713f3c6db9",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 93,
"avg_line_length": 27.848101265822784,
"alnum_prop": 0.5642424242424242,
"repo_name": "cstein/dalton-scripts",
"id": "2332493972900b132559cfe0eb2433a99b7f7e17",
"size": "6622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/efp2pot.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "14015"
}
],
"symlink_target": ""
}
|
from csnAll import *
from csnAPIPublic import GetAPI
import os.path
api = GetAPI("2.5.0")
# define project
dummyLib = api.CreateStandardModuleProject("DummyLib", "library")
# dependencies
# (four depends on three that should be also added implicitly)
dummyLib.AddProjects([two, four])
# source folders
dummyLib.AddLibraryModules(["dummyLib"])
# applications
dummyLib.AddApplications(["myApp"])
# tests
dummyLib.AddTests(["tests/DummyTest/*.h"], cxxTest)
# creates a dependency on thirdParty/cmakeMacros/PCHSupport_26.cmake
found = dummyLib.Glob("*PCH.h")
pchFile = os.path.basename(found[0])
dummyLib.SetPrecompiledHeader(pchFile)
# add compiler definitions
if api.GetCompiler().TargetIsWindows():
dummyLib.AddDefinitions(["-W4 -WX"], private = 1)
else:
dummyLib.AddDefinitions(["-Wall -Werror"], private = 1)
|
{
"content_hash": "1bf95522abee4dbac652f7dadc2201a9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 32.76,
"alnum_prop": 0.7594627594627594,
"repo_name": "msteghofer/CSnake",
"id": "49e929d94f92f7e4f79821e8741571150f6b57d1",
"size": "848",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/data/src_mix/DummyLib/csnDummyLib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1003"
},
{
"name": "C++",
"bytes": "15104"
},
{
"name": "CMake",
"bytes": "67110"
},
{
"name": "NSIS",
"bytes": "6255"
},
{
"name": "Objective-C",
"bytes": "495"
},
{
"name": "Python",
"bytes": "465230"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
}
|
import csv
import os
import pandas as pd
import unittest
from india.geo.states import IndiaStatesMapper
class TestPreprocess(unittest.TestCase):
def test_get_state_name_to_iso_code_mapping(self):
self.assertEqual(
IndiaStatesMapper.get_state_name_to_iso_code_mapping(
"DADRA AND NAGAR HAVELI AND DAMAN AND DIU"), "IN-DH")
self.assertEqual(
IndiaStatesMapper.get_state_name_to_iso_code_mapping(
"ANDHRA PRADESH"), "IN-AP")
self.assertEqual(
IndiaStatesMapper.get_state_name_to_iso_code_mapping(
"DADRA AND NAGAR HAVELI"), "IN-DN")
self.assertEqual(
IndiaStatesMapper.get_state_name_to_iso_code_mapping(
"JAMMU & KASHMIR"), "IN-JK")
def test_get_state_name_to_census2001_code_mapping(self):
# In 2001, LADAKH was part of JAMMU AND KASHMIR, So it should return the code of JAMMU AND KASHMIR
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"LADAKH", district_name=None),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"JAMMU AND KASHMIR", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"JAMMU AND KASHMIR"), "01")
# In 2001, TELANGANA was part of ANDHRA PRADESH, So it should return the code of JAMMU AND KASHMIR
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"TELANGANA", district_name=None),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"ANDHRA PRADESH", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"TELANGANA"), "28")
# In 2001, Uttarakhand was known Uttaranchal
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"UTTARANCHAL", district_name=None),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"UTTARAKHAND", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"UTTARANCHAL"), "05")
# In 2001, district DADRA AND NAGAR HAVELI" was part of UT "DADRA AND NAGAR HAVELI"
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"THE DADRA AND NAGAR HAVELI AND DAMAN AND DIU",
district_name="DADRA AND NAGAR HAVELI"),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"DADRA AND NAGAR HAVELI", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"DADRA AND NAGAR HAVELI"), "26")
# In 2001, district DAMAN was part of UT "DAMAN AND DIU"
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"THE DADRA AND NAGAR HAVELI AND DAMAN AND DIU",
district_name="DAMAN"),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"DAMAN AND DIU", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"DAMAN AND DIU"), "25")
# In 2001, district DIU was part of UT "DAMAN AND DIU"
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"THE DADRA AND NAGAR HAVELI AND DAMAN AND DIU",
district_name="DIU"),
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"DAMAN AND DIU", district_name=None))
self.assertEqual(
IndiaStatesMapper.get_state_name_to_census2001_code_mapping(
"MAHARASHTRA"), "27")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "eac28b6bcf1af4bbd7a84416816292e5",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 106,
"avg_line_length": 44.72527472527472,
"alnum_prop": 0.6191646191646192,
"repo_name": "datacommonsorg/data",
"id": "73a760d38c1bd8edc970f3e57aba00b71f1510c3",
"size": "4646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/india/geo/states_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
}
|
import unittest
import os
# Add parent dir to path to import utils
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..')))
from test_cases import utils
class CRMBatch(unittest.TestCase):
@utils.allow(apis=['crm'], capabilities=['can_crm_batch'])
def test_batch(self):
data = {
'requests': [{
'id': '12345',
'method': 'GET',
'url_data': {
'raw_type': 'Account'
}
}]
}
response = self.account.crm_batch.create(data=data)
self.assertTrue(response['responses'] or response['errors'])
def test_cases():
return [utils.create_test_case(acc, CRMBatch) for acc in utils.accounts]
if __name__ == '__main__':
suite = utils.create_suite(test_cases())
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "42002d3ee06f9c7d5fec6ce071e83ba3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 27.515151515151516,
"alnum_prop": 0.5627753303964758,
"repo_name": "Kloudless/kloudless-python",
"id": "ae06479d86144c4b372e3765d7e3e7bb3dff98ab",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/crm_api/test_batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167047"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from collections import namedtuple
import json
from pathlib import Path
from typing import Any, Dict
from atomic_reactor.plugins.fetch_docker_archive import FetchDockerArchivePlugin
from atomic_reactor.plugins.add_help import AddHelpPlugin
import koji
import koji_cli.lib
import os
import requests
from atomic_reactor.plugins.gather_builds_metadata import GatherBuildsMetadataPlugin
from atomic_reactor.plugins.koji_import import (
KojiImportPlugin,
KojiImportSourceContainerPlugin,
escape_non_printable_chars,
)
from atomic_reactor.plugins.rpmqa import RPMqaPlugin
from atomic_reactor.plugins.add_filesystem import AddFilesystemPlugin
from atomic_reactor.plugins.fetch_sources import PLUGIN_FETCH_SOURCES_KEY
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.inner import DockerBuildWorkflow, TagConf
from atomic_reactor.util import (ManifestDigest, DockerfileImages,
get_manifest_media_version,
graceful_chain_get, RegistryClient)
from atomic_reactor.source import GitSource, PathSource
from atomic_reactor.constants import (IMAGE_TYPE_DOCKER_ARCHIVE, KOJI_BTYPE_OPERATOR_MANIFESTS,
PLUGIN_ADD_FILESYSTEM_KEY,
PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY,
PLUGIN_MAVEN_URL_SOURCES_METADATA_KEY,
PLUGIN_GROUP_MANIFESTS_KEY, PLUGIN_KOJI_PARENT_KEY,
PLUGIN_RESOLVE_COMPOSES_KEY, BASE_IMAGE_KOJI_BUILD,
PARENT_IMAGES_KOJI_BUILDS, BASE_IMAGE_BUILD_ID_KEY,
PLUGIN_PIN_OPERATOR_DIGESTS_KEY,
PLUGIN_PUSH_OPERATOR_MANIFESTS_KEY,
PLUGIN_RESOLVE_REMOTE_SOURCE,
PLUGIN_VERIFY_MEDIA_KEY, PARENT_IMAGE_BUILDS_KEY,
PARENT_IMAGES_KEY, OPERATOR_MANIFESTS_ARCHIVE,
REMOTE_SOURCE_TARBALL_FILENAME,
REMOTE_SOURCE_JSON_FILENAME,
MEDIA_TYPE_DOCKER_V2_SCHEMA2,
MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST,
KOJI_BTYPE_REMOTE_SOURCE_FILE,
KOJI_KIND_IMAGE_BUILD,
KOJI_KIND_IMAGE_SOURCE_BUILD,
KOJI_SUBTYPE_OP_APPREGISTRY,
KOJI_SUBTYPE_OP_BUNDLE,
KOJI_SOURCE_ENGINE,
DOCKERFILE_FILENAME,
REPO_CONTAINER_CONFIG, PLUGIN_CHECK_AND_SET_PLATFORMS_KEY,
PLUGIN_FETCH_MAVEN_KEY, KOJI_METADATA_FILENAME)
from atomic_reactor.utils.flatpak_util import FlatpakUtil
from tests.flatpak import (MODULEMD_AVAILABLE,
setup_flatpak_composes,
setup_flatpak_compose_info)
from tests.mock_env import MockEnv
from tests.util import add_koji_map_in_workflow
from tests.constants import OSBS_BUILD_LOG_FILENAME
from flexmock import flexmock
import pytest
import subprocess
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from osbs.utils import ImageName
LogEntry = namedtuple('LogEntry', ['platform', 'line'])
NAMESPACE = 'mynamespace'
PIPELINE_RUN_NAME = 'test-pipeline-run'
SOURCES_FOR_KOJI_NVR = 'component-release-version'
SOURCES_SIGNING_INTENT = 'some_intent'
REMOTE_SOURCE_FILE_FILENAME = 'pnc-sources.tar.gz'
PUSH_OPERATOR_MANIFESTS_RESULTS = {
"endpoint": 'registry.url/endpoint',
"registryNamespace": 'test_org',
"repository": 'test_repo',
"release": 'test_release',
}
TIME = '2022-05-27T01:46:50Z'
class MockedClientSession(object):
TAG_TASK_ID = 1234
DEST_TAG = 'images-candidate'
def __init__(self, hub, opts=None, task_states=None):
self.metadata: Dict[str, Any] = {}
# destination filename on Koji => file content
self.uploaded_files: Dict[str, bytes] = {}
self.build_tags = {}
self.task_states = task_states or ['OPEN']
self.task_states = list(self.task_states)
self.task_states.reverse()
self.tag_task_state = self.task_states.pop()
self.getLoggedInUser = lambda: {'name': 'osbs'}
self.blocksize = None
self.server_dir = None
self.refunded_build = False
self.fail_state = None
def krb_login(self, principal=None, keytab=None, proxyuser=None):
return True
def ssl_login(self, cert, ca, serverca, proxyuser=None):
return True
def logout(self):
pass
def uploadWrapper(self, localfile, path, name=None, callback=None,
blocksize=1048576, overwrite=True):
self.blocksize = blocksize
with open(localfile, 'rb') as fp:
self.uploaded_files[name] = fp.read()
def CGImport(self, metadata, server_dir, token=None):
# metadata cannot be defined in __init__ because tests assume
# the attribute will not be defined unless this method is called
self.metadata = json.loads(
self.uploaded_files[KOJI_METADATA_FILENAME]
) # pylint: disable=attribute-defined-outside-init
self.server_dir = server_dir
return {"id": "123"}
def getBuildTarget(self, target):
return {'dest_tag_name': self.DEST_TAG}
def getTaskInfo(self, task_id, request=False):
assert task_id == self.TAG_TASK_ID
# For extra code coverage, imagine Koji denies the task ever
# existed.
if self.tag_task_state is None:
return None
return {'state': koji.TASK_STATES[self.tag_task_state], 'owner': 1234}
def getUser(self, user_id):
return {'name': 'osbs'}
def taskFinished(self, task_id):
try:
self.tag_task_state = self.task_states.pop()
except IndexError:
# No more state changes
pass
return self.tag_task_state in ['CLOSED', 'FAILED', 'CANCELED', None]
FAKE_SIGMD5 = b'0' * 32
FAKE_RPM_OUTPUT = (
b'name1;1.0;1;x86_64;0;' + FAKE_SIGMD5 + b';(none);'
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID abcdef01234567\n'
b'gpg-pubkey;01234567;01234567;(none);(none);(none);(none);(none)\n'
b'gpg-pubkey-doc;01234567;01234567;noarch;(none);' + FAKE_SIGMD5 +
b';(none);(none)\n'
b'name2;2.0;2;x86_64;0;' + FAKE_SIGMD5 + b';' +
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID bcdef012345678;(none)\n'
b'\n')
FAKE_OS_OUTPUT = 'fedora-22'
REGISTRY = 'docker.example.com'
def fake_subprocess_output(cmd):
if cmd.startswith('/bin/rpm'):
return FAKE_RPM_OUTPUT
elif 'os-release' in cmd:
return FAKE_OS_OUTPUT
else:
raise RuntimeError
class MockedPopen(object):
def __init__(self, cmd, *args, **kwargs):
self.cmd = cmd
def wait(self):
return 0
def communicate(self):
return (fake_subprocess_output(self.cmd), '')
def fake_Popen(cmd, *args, **kwargs):
return MockedPopen(cmd, *args, **kwargs)
def fake_digest(image):
tag = image.to_str(registry=False)
return 'sha256:{0:032x}'.format(len(tag))
def is_string_type(obj):
return isinstance(obj, str)
class MockResponse(object):
def __init__(self, build_json=None):
self.json = build_json
def get_annotations(self):
return graceful_chain_get(self.json, "metadata", "annotations")
class BuildInfo(object):
def __init__(self, help_file=None, help_valid=True, media_types=None, digests=None):
self.annotations = {}
if media_types:
self.annotations['media-types'] = json.dumps(media_types)
if help_valid:
self.annotations['help_file'] = json.dumps(help_file)
if digests:
digest_annotation = []
for digest_item in digests:
digest_annotation_item = {
"version": get_manifest_media_version(digest_item),
"digest": digest_item.default,
}
digest_annotation.append(digest_annotation_item)
self.annotations['digests'] = json.dumps(digest_annotation)
self.build = MockResponse({'metadata': {'annotations': self.annotations}})
def mock_reactor_config(workflow, allow_multiple_remote_sources=False):
config = {'version': 1, 'koji': {'hub_url': '/',
'root_url': '',
'auth': {}
},
'allow_multiple_remote_sources': allow_multiple_remote_sources,
'openshift': {'url': 'openshift_url'},
'registry': {'url': REGISTRY, 'insecure': False},
}
workflow.conf.conf = config
def mock_environment(workflow: DockerBuildWorkflow, source_dir: Path,
session=None, name=None, oci=False,
component=None, version=None, release=None,
source=None, build_process_failed=False, build_process_canceled=False,
additional_tags=None, has_config=None, add_tag_conf_primaries=True,
container_first=False, yum_repourls=None,
has_op_appregistry_manifests=False,
has_op_bundle_manifests=False,
push_operator_manifests_enabled=False, source_build=False,
has_remote_source=False, has_remote_source_file=False,
has_pnc_build_metadata=False, scratch=False):
if session is None:
session = MockedClientSession('')
if source is None:
source = GitSource('git', 'git://hostname/path')
platforms = ['x86_64']
workflow.data.plugins_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = platforms
workflow.data.plugins_results[PLUGIN_RESOLVE_COMPOSES_KEY] = {'composes': []}
mock_reactor_config(workflow)
workflow.user_params['scratch'] = scratch
if yum_repourls:
workflow.data.all_yum_repourls = yum_repourls
workflow.data.dockerfile_images = DockerfileImages(['Fedora:22'])
flexmock(workflow.imageutil).should_receive('base_image_inspect').and_return({})
setattr(workflow.data, 'tag_conf', TagConf())
setattr(workflow.data, 'reserved_build_id ', None)
setattr(workflow.data, 'reserved_token', None)
with open(source_dir / DOCKERFILE_FILENAME, 'wt') as df:
df.write('FROM base\n'
'LABEL BZComponent={component} com.redhat.component={component}\n'
'LABEL Version={version} version={version}\n'
'LABEL Release={release} release={release}\n'
.format(component=component, version=version, release=release))
if has_op_appregistry_manifests:
df.write('LABEL com.redhat.delivery.appregistry=true\n')
if has_op_bundle_manifests:
df.write('LABEL com.redhat.delivery.operator.bundle=true\n')
if container_first:
with open(source_dir / REPO_CONTAINER_CONFIG, 'wt') as container_conf:
container_conf.write('go:\n'
' modules:\n'
' - module: example.com/packagename\n')
tag_conf = workflow.data.tag_conf
if name and version:
tag_conf.add_unique_image('{}:{}-timestamp'.format(name, version))
if name and version and release and add_tag_conf_primaries:
tag_conf.add_primary_image("{0}:{1}-{2}".format(name, version, release))
tag_conf.add_floating_image(f"{name}:{version}")
tag_conf.add_floating_image(f"{name}:latest")
if additional_tags:
image: str
for image in [f"{name}:{tag}" for tag in additional_tags]:
tag_conf.add_floating_image(image)
flexmock(subprocess, Popen=fake_Popen)
flexmock(koji, ClientSession=lambda hub, opts: session)
(flexmock(GitSource)
.should_receive('path')
.and_return(str(source_dir)))
logs = {
"taskRun1": {"containerA": "log message A", "containerB": "log message B"},
"taskRun2": {"containerC": "log message C"},
}
(flexmock(OSBS)
.should_receive('get_build_logs')
.with_args(PIPELINE_RUN_NAME)
.and_return(logs))
start_time_json = {'status': {'startTime': TIME}}
(flexmock(OSBS)
.should_receive('get_build')
.with_args(PIPELINE_RUN_NAME)
.and_return(start_time_json))
setattr(workflow, 'source', source)
flexmock(workflow.source).should_receive('commit_id').and_return('123456')
workflow.build_dir.init_build_dirs(platforms, workflow.source)
def custom_get(method, url, headers, **kwargs):
if url == manifest_url:
return manifest_response
if url == config_blob_url:
return config_blob_response
for image in tag_conf.images:
if oci:
digest = ManifestDigest(v1='sha256:not-used',
oci=fake_digest(image))
else:
digest = ManifestDigest(v1='sha256:not-used',
v2=fake_digest(image))
(flexmock(RegistryClient)
.should_receive('get_manifest_digests')
.and_return(digest))
manifest_response = requests.Response()
MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
manifest_json = {
"schemaVersion": 2,
"mediaType": MEDIA_TYPE,
"config": {
"mediaType": MEDIA_TYPE,
"digest": fake_digest(image),
"size": 314
},
}
(flexmock(manifest_response,
raise_for_status=lambda: None,
json=manifest_json,
headers={
'Content-Type': MEDIA_TYPE,
'Docker-Content-Digest': fake_digest(image)
}))
if oci:
digest_str = digest.oci
else:
digest_str = digest.v2
manifest_url = "https://{}/v2/{}/manifests/{}".format(REGISTRY, image.to_str(tag=False),
digest_str)
config_blob_url = "https://{}/v2/{}/blobs/{}".format(REGISTRY, image.to_str(tag=False),
digest_str)
if has_config:
config_json = {'config': {'architecture': 'x86_64'},
'container_config': {}}
else:
config_json = None
config_blob_response = requests.Response()
(flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json))
(flexmock(requests.Session)
.should_receive('request')
.replace_with(custom_get))
if not source_build:
workflow.data.plugins_results[FetchDockerArchivePlugin.key] = {
workflow.build_dir.any_platform.platform: {"type": IMAGE_TYPE_DOCKER_ARCHIVE}
}
build_dir_path = workflow.build_dir.any_platform.path
image_tar = build_dir_path / 'image.tar.gz'
image_tar.write_text('x' * 2**12, "utf-8")
workflow.data.plugin_failed = build_process_failed
if build_process_failed and build_process_canceled:
workflow.data.task_canceled = True
workflow.prebuild_plugins_conf = {}
workflow.data.plugins_results[PLUGIN_FETCH_SOURCES_KEY] = {
'sources_for_nvr': SOURCES_FOR_KOJI_NVR,
'signing_intent': SOURCES_SIGNING_INTENT,
}
workflow.data.plugins_results[RPMqaPlugin.key] = [
"name1;1.0;1;x86_64;0;2000;" + FAKE_SIGMD5.decode() + ";23000;"
"RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
"name2;2.0;1;x86_64;0;3000;" + FAKE_SIGMD5.decode() + ";24000"
"RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
]
workflow.data.plugins_results[GatherBuildsMetadataPlugin.key] = {
'x86_64': {
'buildroots': [
{
'container': {
'type': 'none',
'arch': 'x86_64'
},
'content_generator': {
'version': '1.6.23',
'name': 'atomic-reactor'
},
'host': {
'os': 'Red Hat Enterprise Linux Server 7.3 (Maipo)',
'arch': 'x86_64'
},
'id': 1,
'components': [],
'tools': [],
}
],
'metadata_version': 0,
'output': [
{
'type': 'log',
'arch': 'noarch',
'filename': 'openshift-final.log',
'filesize': 106690,
'checksum': '2efa754467c0d2ea1a98fb8bfe435955',
'checksum_type': 'md5',
'buildroot_id': 1
},
{
'type': 'log',
'arch': 'noarch',
'filename': 'build.log',
'filesize': 1660,
'checksum': '8198de09fc5940cf7495e2657039ee72',
'checksum_type': 'md5',
'buildroot_id': 1
},
{
'extra': {
'image': {
'arch': 'x86_64'
},
'docker': {
'repositories': [
'docker-registry.example.com:8888/myproject/hello-world:unique-tag',
'docker-registry.example.com:8888/myproject/hello-world@sha256:...',
],
'parent_id': 'sha256:bf203442',
'id': '123456',
}
},
'checksum': '58a52e6f3ed52818603c2744b4e2b0a2',
'filename': 'test.x86_64.tar.gz',
'buildroot_id': 1,
'components': [
{
'name': 'tzdata',
'sigmd5': 'd9dc4e4f205428bc08a52e602747c1e9',
'arch': 'noarch',
'epoch': None,
'version': '2016d',
'signature': '199e2f91fd431d51',
'release': '1.el7',
'type': 'rpm'
},
{
'name': 'setup',
'sigmd5': 'b1e5ca72c71f94112cd9fb785b95d4da',
'arch': 'noarch',
'epoch': None,
'version': '2.8.71',
'signature': '199e2f91fd431d51',
'release': '6.el7',
'type': 'rpm'
},
],
'type': 'docker-image',
'checksum_type': 'md5',
'arch': 'x86_64',
'filesize': 71268781
}
]
}
}
if has_op_appregistry_manifests or has_op_bundle_manifests:
build_dir_path = workflow.build_dir.any_platform.path
archive_file = build_dir_path / OPERATOR_MANIFESTS_ARCHIVE
archive_file.write_bytes(b'20220329')
results = workflow.data.plugins_results
results[PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY] = str(archive_file)
if has_remote_source:
source_path = build_dir_path / REMOTE_SOURCE_TARBALL_FILENAME
source_path.write_text('dummy file', 'utf-8')
remote_source_result = [
{
"name": None,
"url": "https://cachito.com/api/v1/requests/21048/download",
"remote_source_json": {
"filename": REMOTE_SOURCE_JSON_FILENAME,
"json": {"stub": "data"},
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": str(source_path),
},
}
]
workflow.data.plugins_results[PLUGIN_RESOLVE_REMOTE_SOURCE] = remote_source_result
else:
workflow.data.plugins_results[PLUGIN_RESOLVE_REMOTE_SOURCE] = None
if has_remote_source_file:
filepath = build_dir_path / REMOTE_SOURCE_FILE_FILENAME
filepath.write_text('dummy file', 'utf-8')
workflow.data.plugins_results[PLUGIN_MAVEN_URL_SOURCES_METADATA_KEY] = {
'remote_source_files': [
{
'file': str(filepath),
'metadata': {
'type': KOJI_BTYPE_REMOTE_SOURCE_FILE,
'checksum_type': 'md5',
'checksum': '5151c',
'filename': REMOTE_SOURCE_FILE_FILENAME,
'filesize': os.path.getsize(filepath),
'extra': {
'source-url': 'example.com/dummy.tar.gz',
'artifacts': [
{
'url': 'example.com/dummy.jar',
'checksum_type': 'md5',
'checksum': 'abc',
'filename': 'dummy.jar'
}
],
'typeinfo': {
KOJI_BTYPE_REMOTE_SOURCE_FILE: {}
},
},
}
}
],
}
workflow.data.plugins_results[PLUGIN_FETCH_MAVEN_KEY] = {
'no_source': [{
'url': 'example.com/dummy-no-source.jar',
'checksum_type': 'md5',
'checksum': 'abc',
'filename': 'dummy-no-source.jar'
}],
}
if has_pnc_build_metadata:
workflow.data.plugins_results[PLUGIN_FETCH_MAVEN_KEY] = {
'pnc_build_metadata': {
'builds': [
{'id': 12345},
{'id': 12346}
]
}
}
if push_operator_manifests_enabled:
workflow.data.plugins_results[PLUGIN_PUSH_OPERATOR_MANIFESTS_KEY] = \
PUSH_OPERATOR_MANIFESTS_RESULTS
@pytest.fixture
def workflow(workflow):
"""Add additional data to provide pipeline_run_name specifically."""
workflow.user_params.update({
'koji_task_id': MockedClientSession.TAG_TASK_ID,
})
return workflow
@pytest.fixture
def _os_env(monkeypatch):
monkeypatch.setenv('OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE', 'buildroot:latest')
def create_runner(workflow, ssl_certs=False, principal=None,
keytab=None, target=None, blocksize=None,
upload_plugin_name=KojiImportPlugin.key, userdata=None):
args = {}
if target:
args['target'] = target
args['poll_interval'] = 0
if blocksize:
args['blocksize'] = blocksize
if userdata:
args['userdata'] = userdata
add_koji_map_in_workflow(workflow, hub_url='',
ssl_certs_dir='/' if ssl_certs else None,
krb_principal=principal,
krb_keytab=keytab)
return (MockEnv(workflow)
.for_plugin(upload_plugin_name, args=args)
.create_runner())
@pytest.mark.usefixtures('user_params')
class TestKojiImport(object):
def test_koji_import_get_buildroot(self, workflow, source_dir):
metadatas = {
'ppc64le': {
'buildroots': [
{
'container': {
'type': 'docker',
'arch': 'ppc64le'
},
'id': 1
}
],
},
'x86_64': {
'buildroots': [
{
'container': {
'type': 'docker',
'arch': 'x86_64'
},
'id': 1
}
],
},
}
results = [
{
'container': {
'arch': 'ppc64le',
'type': 'docker',
},
'id': 1,
},
{
'container': {
'arch': 'x86_64',
'type': 'docker',
},
'id': 1,
},
]
session = MockedClientSession('')
mock_environment(workflow, source_dir, session=session, build_process_failed=True,
name='ns/name', version='1.0', release='1')
add_koji_map_in_workflow(workflow, hub_url='')
workflow.data.plugins_results[GatherBuildsMetadataPlugin.key] = metadatas
plugin = KojiImportPlugin(workflow)
assert plugin.get_buildroot() == results
def test_koji_import_no_build_metadata(self, workflow, source_dir):
mock_environment(workflow, source_dir, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
# No metadata
workflow.user_params = {}
workflow.pipeline_run_name = None
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_import_wrong_source_type(self, workflow, source_dir):
source = PathSource('path', f'file://{source_dir}')
mock_environment(workflow, source_dir, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
setattr(workflow, 'source', source)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_import' raised an exception: RuntimeError" in str(exc.value)
@pytest.mark.parametrize(('isolated'), [
False,
True,
None
])
def test_isolated_metadata_json(self, workflow, source_dir, isolated):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
session=session, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
if isolated is not None:
workflow.user_params['isolated'] = isolated
runner.run()
build_metadata = session.metadata['build']['extra']['image']['isolated']
if isolated:
assert build_metadata is True
else:
assert build_metadata is False
@pytest.mark.parametrize(('userdata'), [
None,
{},
{'custom': 'userdata'},
])
def test_userdata_metadata(self, workflow, source_dir, userdata):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
session=session, name='ns/name',
version='1.0', release='1')
runner = create_runner(workflow, userdata=userdata)
runner.run()
build_extra_metadata = session.metadata['build']['extra']
if userdata:
assert build_extra_metadata['custom_user_metadata'] == userdata
else:
assert 'custom_user_metadata' not in build_extra_metadata
@pytest.mark.parametrize(('koji_task_id', 'expect_success'), [
(12345, True),
('x', False),
])
def test_koji_import_log_task_id(self, workflow, source_dir,
caplog, koji_task_id, expect_success):
session = MockedClientSession('')
session.getTaskInfo = lambda x: {'owner': 1234, 'state': 1}
setattr(session, 'getUser', lambda x: {'name': 'dev1'})
mock_environment(workflow, source_dir,
session=session, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
workflow.user_params['koji_task_id'] = koji_task_id
runner.run()
metadata = session.metadata
assert 'build' in metadata
build = metadata['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
if expect_success:
assert "Koji Task ID {}".format(koji_task_id) in caplog.text
assert 'container_koji_task_id' in extra
extra_koji_task_id = extra['container_koji_task_id']
assert isinstance(extra_koji_task_id, int)
assert extra_koji_task_id == koji_task_id
else:
assert "invalid task ID" in caplog.text
assert 'container_koji_task_id' not in extra
@pytest.mark.parametrize('params', [
{
'should_raise': False,
'principal': None,
'keytab': None,
},
{
'should_raise': False,
'principal': '[email protected]',
'keytab': 'FILE:/var/run/secrets/mysecret',
},
{
'should_raise': True,
'principal': '[email protected]',
'keytab': None,
},
{
'should_raise': True,
'principal': None,
'keytab': 'FILE:/var/run/secrets/mysecret',
},
])
def test_koji_import_krb_args(self, workflow, source_dir, params):
session = MockedClientSession('')
expectation = flexmock(session).should_receive('krb_login').and_return(True)
name = 'name'
version = '1.0'
release = '1'
mock_environment(workflow, source_dir,
session=session, name=name, version=version, release=release)
runner = create_runner(workflow,
principal=params['principal'],
keytab=params['keytab'])
if params['should_raise']:
expectation.never()
with pytest.raises(PluginFailedException):
runner.run()
else:
expectation.once()
runner.run()
def test_koji_import_krb_fail(self, workflow, source_dir):
session = MockedClientSession('')
(flexmock(session)
.should_receive('krb_login')
.and_raise(RuntimeError)
.once())
mock_environment(workflow, source_dir,
session=session, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_import_ssl_fail(self, workflow, source_dir):
session = MockedClientSession('')
(flexmock(session)
.should_receive('ssl_login')
.and_raise(RuntimeError)
.once())
mock_environment(workflow, source_dir,
session=session, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow, ssl_certs=True)
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize('fail_method', [
'get_build_logs',
])
def test_koji_import_osbs_fail(self, workflow, source_dir, fail_method):
mock_environment(workflow, source_dir, name='name', version='1.0', release='1')
(flexmock(OSBS)
.should_receive(fail_method)
.and_raise(OsbsException))
runner = create_runner(workflow)
runner.run()
@staticmethod
def check_components(components):
assert isinstance(components, list)
assert len(components) > 0
for component_rpm in components:
assert isinstance(component_rpm, dict)
assert set(component_rpm.keys()) == {
'type',
'name',
'version',
'release',
'epoch',
'arch',
'sigmd5',
'signature',
}
assert component_rpm['type'] == 'rpm'
assert component_rpm['name']
assert is_string_type(component_rpm['name'])
assert component_rpm['name'] != 'gpg-pubkey'
assert component_rpm['version']
assert is_string_type(component_rpm['version'])
assert component_rpm['release']
epoch = component_rpm['epoch']
assert epoch is None or isinstance(epoch, int)
assert is_string_type(component_rpm['arch'])
assert component_rpm['signature'] != '(none)'
def validate_buildroot(self, buildroot, source=False):
assert isinstance(buildroot, dict)
assert set(buildroot.keys()) == {
'id',
'host',
'content_generator',
'container',
'components',
'tools',
}
host = buildroot['host']
assert isinstance(host, dict)
assert set(host.keys()) == {'os', 'arch'}
# assert host['os']
# assert is_string_type(host['os'])
assert host['arch']
assert is_string_type(host['arch'])
assert host['arch'] != 'amd64'
content_generator = buildroot['content_generator']
assert isinstance(content_generator, dict)
assert set(content_generator.keys()) == {'name', 'version'}
assert content_generator['name']
assert is_string_type(content_generator['name'])
assert content_generator['version']
assert is_string_type(content_generator['version'])
container = buildroot['container']
assert isinstance(container, dict)
assert set(container.keys()) == {'type', 'arch'}
assert container['type'] == 'none'
assert container['arch']
assert is_string_type(container['arch'])
def validate_output(self, output, has_config, source=False):
assert isinstance(output, dict)
assert 'buildroot_id' in output
assert 'filename' in output
assert output['filename']
assert is_string_type(output['filename'])
assert 'filesize' in output
assert int(output['filesize']) > 0
assert 'arch' in output
assert output['arch']
assert is_string_type(output['arch'])
assert 'checksum' in output
assert output['checksum']
assert is_string_type(output['checksum'])
assert 'checksum_type' in output
assert output['checksum_type'] == 'md5'
assert is_string_type(output['checksum_type'])
assert 'type' in output
if output['type'] == 'log':
assert set(output.keys()) == {
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
}
assert output['arch'] == 'noarch'
else:
assert set(output.keys()) == {
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'components',
'extra',
}
assert output['type'] == 'docker-image'
assert is_string_type(output['arch'])
assert output['arch'] != 'noarch'
assert output['arch'] in output['filename']
if not source:
self.check_components(output['components'])
else:
assert output['components'] == []
extra = output['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == {'image', 'docker'}
image = extra['image']
assert isinstance(image, dict)
assert set(image.keys()) == {'arch'}
assert image['arch'] == output['arch'] # what else?
assert 'docker' in extra
docker = extra['docker']
assert isinstance(docker, dict)
if source:
expected_keys_set = {
'tags',
'digests',
'layer_sizes',
'repositories',
'id',
}
else:
expected_keys_set = {
'parent_id',
'id',
'repositories',
'tags',
'floating_tags',
'unique_tags',
}
if has_config:
expected_keys_set.add('config')
assert set(docker.keys()) == expected_keys_set
if not source:
assert is_string_type(docker['parent_id'])
assert is_string_type(docker['id'])
repositories = docker['repositories']
assert isinstance(repositories, list)
repositories_digest = list(filter(lambda repo: '@sha256' in repo, repositories))
assert sorted(repositories_digest) == sorted(set(repositories_digest))
def test_koji_import_import_fail(self, workflow, source_dir, caplog):
session = MockedClientSession('')
(flexmock(session)
.should_receive('CGImport')
.and_raise(RuntimeError))
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
mock_environment(workflow, source_dir,
name=name, version=version, release=release, session=session)
runner = create_runner(workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
assert 'metadata:' in caplog.text
@pytest.mark.parametrize(('parent_id', 'expect_success', 'expect_error'), [
(1234, True, False),
(None, False, False),
('x', False, True),
('NO-RESULT', False, False),
])
def test_koji_import_parent_id(self, parent_id, expect_success, expect_error,
workflow, source_dir, caplog):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
koji_parent_result = None
if parent_id != 'NO-RESULT':
koji_parent_result = {
BASE_IMAGE_KOJI_BUILD: {'id': parent_id},
}
workflow.data.plugins_results[PLUGIN_KOJI_PARENT_KEY] = koji_parent_result
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
if expect_error:
assert 'invalid koji parent id' in caplog.text
if expect_success:
image = extra['image']
assert isinstance(image, dict)
assert BASE_IMAGE_BUILD_ID_KEY in image
parent_image_koji_build_id = image[BASE_IMAGE_BUILD_ID_KEY]
assert isinstance(parent_image_koji_build_id, int)
assert parent_image_koji_build_id == parent_id
else:
if 'image' in extra:
assert BASE_IMAGE_BUILD_ID_KEY not in extra['image']
@pytest.mark.parametrize('base_from_scratch', [True, False]) # noqa: F811
def test_produces_metadata_for_parent_images(
self, workflow, source_dir, base_from_scratch
):
koji_session = MockedClientSession('')
mock_environment(workflow, source_dir,
session=koji_session, name='ns/name', version='1.0', release='1')
koji_parent_result = {
BASE_IMAGE_KOJI_BUILD: dict(id=16, extra='build info'),
PARENT_IMAGES_KOJI_BUILDS: {
str(ImageName.parse('base')): dict(nvr='base-16.0-1', id=16, extra='build_info'),
},
}
workflow.data.plugins_results[PLUGIN_KOJI_PARENT_KEY] = koji_parent_result
dockerfile_images = ['base:latest', 'scratch', 'some:1.0']
if base_from_scratch:
dockerfile_images.append('scratch')
workflow.data.dockerfile_images = DockerfileImages(dockerfile_images)
runner = create_runner(workflow)
runner.run()
image_metadata = koji_session.metadata['build']['extra']['image']
key = PARENT_IMAGE_BUILDS_KEY
assert key in image_metadata
assert image_metadata[key]['base:latest'] == dict(nvr='base-16.0-1', id=16)
assert 'extra' not in image_metadata[key]['base:latest']
key = BASE_IMAGE_BUILD_ID_KEY
if base_from_scratch:
assert key not in image_metadata
else:
assert key in image_metadata
assert image_metadata[key] == 16
key = PARENT_IMAGES_KEY
assert key in image_metadata
assert image_metadata[key] == dockerfile_images
@pytest.mark.parametrize(('task_id', 'expect_success'), [
(1234, True),
('x', False),
])
def test_koji_import_filesystem_koji_task_id(
self, task_id, expect_success, workflow, source_dir, caplog
):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
workflow.data.plugins_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
'filesystem-koji-task-id': task_id,
}
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
if expect_success:
assert 'filesystem_koji_task_id' in extra
filesystem_koji_task_id = extra['filesystem_koji_task_id']
assert isinstance(filesystem_koji_task_id, int)
assert filesystem_koji_task_id == task_id
else:
assert 'invalid task ID' in caplog.text
assert 'filesystem_koji_task_id' not in extra
def test_koji_import_filesystem_koji_task_id_missing(
self, workflow, source_dir, caplog
):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
workflow.data.plugins_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
}
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'filesystem_koji_task_id' not in extra
assert AddFilesystemPlugin.key in caplog.text
@pytest.mark.parametrize('blocksize', (None, 1048576))
@pytest.mark.parametrize('verify_media', (
['v1', 'v2', 'v2_list'],
['v1'],
False)
)
@pytest.mark.parametrize('has_reserved_build', (True, False))
@pytest.mark.parametrize(('task_states', 'skip_import'), [
(['OPEN'], False),
(['FAILED'], True),
])
def test_koji_import_success(self, workflow, source_dir, caplog,
blocksize, verify_media, has_reserved_build, task_states,
skip_import):
session = MockedClientSession('', task_states=task_states)
component = 'component'
name = 'ns/name'
version = '1.0'
release = '1'
mock_environment(workflow, source_dir,
session=session, component=component,
name=name, version=version, release=release)
if verify_media:
workflow.data.plugins_results[PLUGIN_VERIFY_MEDIA_KEY] = verify_media
expected_media_types = verify_media or []
build_token = 'token_12345'
build_id = '123'
if has_reserved_build:
workflow.data.reserved_build_id = build_id
workflow.data.reserved_token = build_token
if has_reserved_build:
(flexmock(session)
.should_call('CGImport')
.with_args(KOJI_METADATA_FILENAME, str, token=build_token)
)
else:
(flexmock(session)
.should_call('CGImport')
.with_args(KOJI_METADATA_FILENAME, str, token=None)
)
target = 'images-docker-candidate'
runner = create_runner(workflow, target=target, blocksize=blocksize)
runner.run()
if skip_import:
log_msg = "Koji task is not in Open state, but in {}, not importing build".\
format(task_states[0])
assert log_msg in caplog.text
return
data = session.metadata
assert set(data.keys()) == {
'metadata_version',
'build',
'buildroots',
'output',
}
assert data['metadata_version'] in ['0', 0]
build = data['build']
assert isinstance(build, dict)
buildroots = data['buildroots']
assert isinstance(buildroots, list)
assert len(buildroots) > 0
output_files = data['output']
assert isinstance(output_files, list)
expected_keys = {
'name',
'version',
'release',
'source',
'start_time',
'end_time',
'extra', # optional but always supplied
'owner',
}
if has_reserved_build:
expected_keys.add('build_id')
assert set(build.keys()) == expected_keys
if has_reserved_build:
assert build['build_id'] == build_id
assert build['name'] == component
assert build['version'] == version
assert build['release'] == release
assert build['source'] == 'git://hostname/path#123456'
start_time = build['start_time']
assert isinstance(start_time, int) and start_time
end_time = build['end_time']
assert isinstance(end_time, int) and end_time
extra = build['extra']
assert isinstance(extra, dict)
assert 'osbs_build' in extra
osbs_build = extra['osbs_build']
assert isinstance(osbs_build, dict)
assert 'kind' in osbs_build
assert osbs_build['kind'] == KOJI_KIND_IMAGE_BUILD
assert 'subtypes' in osbs_build
assert osbs_build['subtypes'] == []
assert 'engine' in osbs_build
assert osbs_build['engine'] == 'podman'
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
if expected_media_types:
media_types = image['media_types']
assert isinstance(media_types, list)
assert sorted(media_types) == sorted(expected_media_types)
for buildroot in buildroots:
self.validate_buildroot(buildroot)
# Unique within buildroots in this metadata
assert len([b for b in buildroots
if b['id'] == buildroot['id']]) == 1
for output in output_files:
self.validate_output(output, False)
buildroot_id = output['buildroot_id']
# References one of the buildroots
assert len([buildroot for buildroot in buildroots
if buildroot['id'] == buildroot_id]) == 1
build_id = runner.plugins_results[KojiImportPlugin.key]
assert build_id == "123"
assert set(session.uploaded_files.keys()) == {
OSBS_BUILD_LOG_FILENAME,
KOJI_METADATA_FILENAME,
}
osbs_build_log = session.uploaded_files[OSBS_BUILD_LOG_FILENAME]
assert osbs_build_log == b"log message A\nlog message B\nlog message C\n"
assert workflow.data.annotations['koji-build-id'] == '123'
def test_koji_import_owner_submitter(self, workflow, source_dir):
session = MockedClientSession('')
session.getTaskInfo = lambda x: {'owner': 1234, 'state': 1}
setattr(session, 'getUser', lambda x: {'name': 'dev1'})
mock_environment(workflow, source_dir,
session=session, name='ns/name', version='1.0', release='1')
runner = create_runner(workflow)
workflow.user_params['koji_task_id'] = 1234
runner.run()
metadata = session.metadata
assert metadata['build']['extra']['submitter'] == 'osbs'
assert metadata['build']['owner'] == 'dev1'
def test_koji_import_pullspec(self, workflow, source_dir):
session = MockedClientSession('')
name = 'myproject/hello-world'
version = '1.0'
release = '1'
mock_environment(workflow, source_dir,
session=session, name=name, version=version, release=release)
runner = create_runner(workflow)
runner.run()
log_outputs = [
output
for output in session.metadata['output']
if output['type'] == 'log'
]
assert log_outputs
docker_outputs = [
output
for output in session.metadata['output']
if output['type'] == 'docker-image'
]
assert len(docker_outputs) == 1
docker_output = docker_outputs[0]
digest_pullspecs = [
repo
for repo in docker_output['extra']['docker']['repositories']
if '@sha256' in repo
]
assert len(digest_pullspecs) == 1
# Check registry
reg = set(ImageName.parse(repo).registry
for repo in docker_output['extra']['docker']['repositories'])
assert len(reg) == 1
assert reg == {'docker-registry.example.com:8888'}
def test_koji_import_without_build_info(self, workflow, source_dir):
class LegacyCGImport(MockedClientSession):
def CGImport(self, metadata, server_dir, token=None):
super(LegacyCGImport, self).CGImport(metadata, server_dir, token)
return
session = LegacyCGImport('')
name = 'ns/name'
version = '1.0'
release = '1'
mock_environment(workflow, source_dir,
session=session, name=name, version=version, release=release)
runner = create_runner(workflow)
runner.run()
assert runner.plugins_results[KojiImportPlugin.key] is None
@pytest.mark.parametrize('add_help_results,expected_help_file', [
[{}, None],
[{AddHelpPlugin.key: {}}, None],
[{AddHelpPlugin.key: {'help_file': None}}, None],
[{AddHelpPlugin.key: {'help_file': 'help.md'}}, 'help.md'],
])
def test_koji_import_add_help(self, add_help_results, expected_help_file, workflow, source_dir):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
workflow.data.plugins_results.update(add_help_results)
runner = create_runner(workflow)
runner.run()
data = session.metadata
extra_image = data['build']['extra']['image']
if not add_help_results or not add_help_results[AddHelpPlugin.key]:
assert 'help' not in extra_image
else:
assert expected_help_file == extra_image['help']
@pytest.mark.skipif(not MODULEMD_AVAILABLE,
reason="libmodulemd not available")
def test_koji_import_flatpak(self, workflow, source_dir):
workflow.user_params['flatpak'] = True
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
setup_flatpak_composes(workflow)
(flexmock(FlatpakUtil)
.should_receive('get_flatpak_compose_info')
.replace_with(setup_flatpak_compose_info))
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
assert 'osbs_build' in extra
osbs_build = extra['osbs_build']
assert osbs_build['subtypes'] == ['flatpak']
assert image.get('flatpak') is True
assert image.get('modules') == ['eog-f28-20170629213428',
'flatpak-runtime-f28-20170701152209']
assert image.get('source_modules') == ['eog:f28']
assert image.get('odcs') == {
'compose_ids': [22422, 42],
'signing_intent': 'unsigned',
'signing_intent_overridden': False,
}
@pytest.mark.parametrize('build_result,expected', [
[{}, None],
[{PLUGIN_VERIFY_MEDIA_KEY: []}, []],
[
{PLUGIN_VERIFY_MEDIA_KEY: [MEDIA_TYPE_DOCKER_V2_SCHEMA2]},
[MEDIA_TYPE_DOCKER_V2_SCHEMA2],
],
])
def test_koji_import_set_media_types(
self, workflow, source_dir, build_result, expected
):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
workflow.data.plugins_results.update(build_result)
runner = create_runner(workflow)
runner.run()
data = session.metadata
image = data['build']['extra']['image']
if not build_result or not build_result[PLUGIN_VERIFY_MEDIA_KEY]:
assert 'media_types' not in image
else:
assert expected == image['media_types']
@pytest.mark.parametrize('is_scratch', [True, False])
@pytest.mark.parametrize('digest', [
None,
ManifestDigest(v2_list='sha256:e6593f3e'),
])
def test_koji_import_set_manifest_list_info(self, caplog, workflow, source_dir,
is_scratch, digest):
session = MockedClientSession('')
version = '1.0'
release = '1'
name = 'ns/name'
unique_tag = "{}-timestamp".format(version)
mock_environment(workflow, source_dir,
name=name, version=version, release=release,
session=session, add_tag_conf_primaries=not is_scratch, scratch=is_scratch)
group_manifest_result = {"media_type": MEDIA_TYPE_DOCKER_V2_SCHEMA2}
if digest:
group_manifest_result = {
'media_type': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST,
'manifest_digest': digest
}
workflow.data.plugins_results[PLUGIN_GROUP_MANIFESTS_KEY] = group_manifest_result
flexmock(koji_cli.lib).should_receive('unique_path').and_return('upload-dir')
runner = create_runner(workflow)
runner.run()
if is_scratch:
medata_tag = 'platform:_metadata_'
metadata_file = KOJI_METADATA_FILENAME
assert metadata_file in session.uploaded_files
data = json.loads(session.uploaded_files[metadata_file])
meta_record = ''
for rec in caplog.records:
if medata_tag in rec.message:
_, meta_record = rec.message.rsplit(' ', 1)
break
assert os.path.join('upload-dir', metadata_file) == meta_record
else:
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
expected_results = {'unique_tags': [unique_tag]}
expected_results['floating_tags'] = [
tag.tag for tag in workflow.data.tag_conf.floating_images
]
if is_scratch:
expected_results['tags'] = [
tag.tag for tag in workflow.data.tag_conf.images
]
else:
expected_results['tags'] = [
tag.tag for tag in workflow.data.tag_conf.primary_images
]
for tag in expected_results['tags']:
if '-' in tag:
version_release = tag
break
else:
raise RuntimeError("incorrect test data")
if digest:
assert 'index' in image.keys()
pullspec = "{}/{}@{}".format(REGISTRY, name, digest.v2_list)
expected_results['pull'] = [pullspec]
pullspec = "{}/{}:{}".format(REGISTRY, name, version_release)
expected_results['pull'].append(pullspec)
expected_results['digests'] = {
'application/vnd.docker.distribution.manifest.list.v2+json': digest.v2_list}
assert image['index'] == expected_results
else:
assert 'index' not in image.keys()
assert 'output' in data
for output in data['output']:
if output['type'] == 'log':
continue
assert 'extra' in output
extra = output['extra']
assert 'docker' in extra
assert 'tags' in extra['docker']
assert 'floating_tags' in extra['docker']
assert 'unique_tags' in extra['docker']
assert sorted(expected_results['tags']) == sorted(extra['docker']['tags'])
assert (sorted(expected_results['floating_tags']) ==
sorted(extra['docker']['floating_tags']))
assert (sorted(expected_results['unique_tags']) ==
sorted(extra['docker']['unique_tags']))
repositories = extra['docker']['repositories']
assert len(repositories) == 2
assert len([pullspec for pullspec in repositories
if '@' in pullspec]) == 1
by_tags = [pullspec for pullspec in repositories
if '@' not in pullspec]
assert len(by_tags) == 1
by_tag = by_tags[0]
# This test uses a metadata fragment which reports the
# following registry. In real uses this would really
# be a Crane registry URI.
registry = 'docker-registry.example.com:8888'
assert by_tag == '%s/myproject/hello-world:%s' % (registry,
version_release)
@pytest.mark.parametrize(('add_tag_conf_primaries', 'success'), (
(False, False),
(True, True),
))
def test_koji_import_primary_images(self, workflow, source_dir,
add_tag_conf_primaries, success):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
add_tag_conf_primaries=add_tag_conf_primaries, session=session)
runner = create_runner(workflow)
if not success:
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
assert 'Unable to find version-release image' in str(exc_info.value)
return
runner.run()
@pytest.mark.parametrize(('comp', 'sign_int', 'override'), [
([{'id': 1}, {'id': 2}, {'id': 3}], "beta", True),
([{'id': 2}, {'id': 3}, {'id': 4}], "release", True),
([{'id': 3}, {'id': 4}, {'id': 5}], "beta", False),
([{'id': 4}, {'id': 5}, {'id': 6}], "release", False),
(None, None, None)
])
def test_odcs_metadata_koji(self, workflow, source_dir, comp, sign_int, override):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
workflow.data.plugins_results[PLUGIN_RESOLVE_COMPOSES_KEY] = {
'composes': comp,
'yum_repourls': {'x86_64': []},
'include_koji_repo': False,
'signing_intent': sign_int,
'signing_intent_overridden': override,
}
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
if comp:
comp_ids = [item['id'] for item in comp]
assert 'odcs' in image
odcs = image['odcs']
assert isinstance(odcs, dict)
assert odcs['compose_ids'] == comp_ids
assert odcs['signing_intent'] == sign_int
assert odcs['signing_intent_overridden'] == override
else:
assert 'odcs' not in image
@pytest.mark.parametrize('resolve_run', [
True,
False,
])
def test_odcs_metadata_koji_plugin_run(self, workflow, source_dir, resolve_run):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1', session=session)
if resolve_run:
workflow.data.plugins_results[PLUGIN_RESOLVE_COMPOSES_KEY] = {'composes': []}
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
assert 'odcs' not in image
@pytest.mark.parametrize('container_first', [True, False])
def test_go_metadata(self, workflow, source_dir, container_first):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, container_first=container_first)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
if container_first:
assert 'go' in image
go = image['go']
assert isinstance(go, dict)
assert 'modules' in go
modules = go['modules']
assert isinstance(modules, list)
assert len(modules) == 1
module = modules[0]
assert module['module'] == 'example.com/packagename'
else:
assert 'go' not in image
@pytest.mark.parametrize('yum_repourl', [
None,
[],
["http://example.com/my.repo", ],
["http://example.com/my.repo", "http://example.com/other.repo"],
])
def test_yum_repourls_metadata(self, workflow, source_dir, yum_repourl):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, yum_repourls=yum_repourl)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
if yum_repourl:
assert 'yum_repourls' in image
repourls = image['yum_repourls']
assert isinstance(repourls, list)
assert repourls == yum_repourl
else:
assert 'yum_repourls' not in image
@pytest.mark.parametrize('has_appregistry_manifests', [True, False])
@pytest.mark.parametrize('has_bundle_manifests', [True, False])
@pytest.mark.parametrize('push_operator_manifests', [True, False])
def test_set_operators_metadata(
self, workflow, source_dir,
has_appregistry_manifests, has_bundle_manifests,
push_operator_manifests):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session,
has_op_appregistry_manifests=has_appregistry_manifests,
has_op_bundle_manifests=has_bundle_manifests,
push_operator_manifests_enabled=push_operator_manifests)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'osbs_build' in extra
osbs_build = extra['osbs_build']
if has_appregistry_manifests or has_bundle_manifests:
assert 'operator_manifests_archive' in extra
operator_manifests = extra['operator_manifests_archive']
assert isinstance(operator_manifests, str)
assert operator_manifests == OPERATOR_MANIFESTS_ARCHIVE
assert 'typeinfo' in extra
assert 'operator-manifests' in extra['typeinfo']
operator_typeinfo = extra['typeinfo']['operator-manifests']
assert isinstance(operator_typeinfo, dict)
assert operator_typeinfo['archive'] == OPERATOR_MANIFESTS_ARCHIVE
else:
assert 'operator_manifests_archive' not in extra
assert 'typeinfo' not in extra
# having manifests pushed without extraction cannot happen, but plugins handles
# results independently so test it this way
if push_operator_manifests:
assert extra['operator_manifests']['appregistry'] == PUSH_OPERATOR_MANIFESTS_RESULTS
else:
assert 'operator_manifests' not in extra
assert osbs_build['subtypes'] == [
stype for yes, stype in [
(has_appregistry_manifests, KOJI_SUBTYPE_OP_APPREGISTRY),
(has_bundle_manifests, KOJI_SUBTYPE_OP_BUNDLE)
] if yes
]
@pytest.mark.usefixtures('_os_env')
@pytest.mark.parametrize('has_bundle_manifests', [True, False])
def test_operators_bundle_metadata(
self, workflow, source_dir, has_bundle_manifests):
"""Test if metadata (extra.image.operator_manifests) about operator
bundles are properly exported"""
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, has_op_bundle_manifests=has_bundle_manifests)
if has_bundle_manifests:
workflow.data.plugins_results[PLUGIN_PIN_OPERATOR_DIGESTS_KEY] = {
'custom_csv_modifications_applied': False,
'related_images': {
'pullspecs': [
{
'original': ImageName.parse('old-registry/ns/spam:1'),
'new': ImageName.parse('new-registry/new-ns/new-spam@sha256:4'),
'pinned': True,
'replaced': True
}, {
'original': ImageName.parse('old-registry/ns/spam@sha256:4'),
'new': ImageName.parse('new-registry/new-ns/new-spam@sha256:4'),
'pinned': False,
'replaced': True
}, {
'original': ImageName.parse(
'registry.private.example.com/ns/foo@sha256:1'),
'new': ImageName.parse('registry.private.example.com/ns/foo@sha256:1'),
'pinned': False,
'replaced': False
},
],
'created_by_osbs': True,
}
}
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
if has_bundle_manifests:
assert 'operator_manifests' in extra['image']
expected = {
'custom_csv_modifications_applied': False,
'related_images': {
'pullspecs': [
{
'original': 'old-registry/ns/spam:1',
'new': 'new-registry/new-ns/new-spam@sha256:4',
'pinned': True,
}, {
'original': 'old-registry/ns/spam@sha256:4',
'new': 'new-registry/new-ns/new-spam@sha256:4',
'pinned': False,
}, {
'original': 'registry.private.example.com/ns/foo@sha256:1',
'new': 'registry.private.example.com/ns/foo@sha256:1',
'pinned': False,
},
],
'created_by_osbs': True,
}
}
assert extra['image']['operator_manifests'] == expected
else:
assert 'operator_manifests' not in extra['image']
@pytest.mark.usefixtures('_os_env')
@pytest.mark.parametrize('has_op_csv_modifications', [True, False])
def test_operators_bundle_metadata_csv_modifications(
self, workflow, source_dir, has_op_csv_modifications):
"""Test if metadata (extra.image.operator_manifests.custom_csv_modifications_applied)
about operator bundles are properly exported"""
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, has_op_bundle_manifests=True)
plugin_res = {
'custom_csv_modifications_applied': has_op_csv_modifications,
'related_images': {
'pullspecs': [],
'created_by_osbs': True,
}
}
workflow.data.plugins_results[PLUGIN_PIN_OPERATOR_DIGESTS_KEY] = plugin_res
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'operator_manifests' in extra['image']
expected = {
'custom_csv_modifications_applied': has_op_csv_modifications,
'related_images': {
'pullspecs': [],
'created_by_osbs': True,
}
}
assert extra['image']['operator_manifests'] == expected
@pytest.mark.parametrize('has_remote_source', [True, False])
@pytest.mark.parametrize('allow_multiple_remote_sources', [True, False])
def test_remote_sources(self, workflow, source_dir,
has_remote_source, allow_multiple_remote_sources):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, has_remote_source=has_remote_source)
mock_reactor_config(workflow, allow_multiple_remote_sources)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
# https://github.com/PyCQA/pylint/issues/2186
# pylint: disable=W1655
if has_remote_source:
if allow_multiple_remote_sources:
assert extra['image']['remote_sources'] == [
{
'name': None,
'url': 'https://cachito.com/api/v1/requests/21048',
}
]
assert 'typeinfo' in extra
assert 'remote-sources' in extra['typeinfo']
assert extra['typeinfo']['remote-sources'] == [
{
'name': None,
'url': 'https://cachito.com/api/v1/requests/21048',
'archives': ['remote-source.json', 'remote-source.tar.gz'],
}
]
assert REMOTE_SOURCE_TARBALL_FILENAME in session.uploaded_files.keys()
assert REMOTE_SOURCE_JSON_FILENAME in session.uploaded_files.keys()
else:
assert (
extra['image']['remote_source_url']
== 'https://cachito.com/api/v1/requests/21048/download'
)
assert extra['typeinfo']['remote-sources'] == {
"remote_source_url": "https://cachito.com/api/v1/requests/21048/download"
}
assert REMOTE_SOURCE_TARBALL_FILENAME in session.uploaded_files.keys()
assert REMOTE_SOURCE_JSON_FILENAME in session.uploaded_files.keys()
else:
assert 'remote_source_url' not in extra['image']
assert 'typeinfo' not in extra
assert REMOTE_SOURCE_TARBALL_FILENAME not in session.uploaded_files.keys()
assert REMOTE_SOURCE_JSON_FILENAME not in session.uploaded_files.keys()
@pytest.mark.parametrize('has_remote_source_file', [True, False])
def test_remote_source_files(self, workflow, source_dir, has_remote_source_file):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, has_remote_source_file=has_remote_source_file)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
# https://github.com/PyCQA/pylint/issues/2186
# pylint: disable=W1655
if has_remote_source_file:
assert 'typeinfo' in extra
assert 'remote-source-file' in extra['typeinfo']
assert REMOTE_SOURCE_FILE_FILENAME in session.uploaded_files.keys()
else:
assert 'typeinfo' not in extra
assert REMOTE_SOURCE_FILE_FILENAME not in session.uploaded_files.keys()
@pytest.mark.parametrize('has_pnc_build_metadata', [True, False])
def test_pnc_build_metadata(self, workflow, source_dir, has_pnc_build_metadata):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
name='ns/name', version='1.0', release='1',
session=session, has_pnc_build_metadata=has_pnc_build_metadata)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
# https://github.com/PyCQA/pylint/issues/2186
# pylint: disable=W1655
if has_pnc_build_metadata:
assert 'pnc' in extra['image']
assert 'builds' in extra['image']['pnc']
for build in extra['image']['pnc']['builds']:
assert 'id' in build
else:
assert 'pnc' not in extra['image']
@pytest.mark.parametrize('blocksize', (None, 1048576))
@pytest.mark.parametrize(('has_config', 'oci'), ((True, False), (False, True)))
@pytest.mark.parametrize(('verify_media', 'expect_id'), (
(['v1', 'v2', 'v2_list'], 'ab12'),
(['v1'], 'ab12'),
(False, 'ab12')
))
@pytest.mark.parametrize('has_reserved_build', (True, False))
@pytest.mark.parametrize(('task_states', 'skip_import'), [
(['OPEN'], False),
(['FAILED'], True),
])
@pytest.mark.parametrize(('userdata'), [
None,
{},
{'custom': 'userdata'},
])
def test_koji_import_success_source(self, workflow, source_dir, caplog, blocksize,
has_config, oci,
verify_media, expect_id, has_reserved_build, task_states,
skip_import, userdata):
session = MockedClientSession('', task_states=task_states)
# When target is provided koji build will always be tagged,
# either by koji_import or koji_tag_build.
component = 'component'
name = 'ns/name'
version = '1.0'
release = '1'
mock_environment(workflow, source_dir, oci=oci,
session=session, name=name, component=component,
version=version, release=release, has_config=has_config,
source_build=True)
workflow.data.koji_source_nvr = {'name': component, 'version': version, 'release': release}
workflow.data.koji_source_source_url = 'git://hostname/path#123456'
if verify_media:
workflow.data.plugins_results[PLUGIN_VERIFY_MEDIA_KEY] = verify_media
expected_media_types = verify_media or []
build_token = 'token_12345'
build_id = '123'
if has_reserved_build:
workflow.data.reserved_build_id = build_id
workflow.data.reserved_token = build_token
if has_reserved_build:
(flexmock(session)
.should_call('CGImport')
.with_args(KOJI_METADATA_FILENAME, str, token=build_token)
)
else:
(flexmock(session)
.should_call('CGImport')
.with_args(KOJI_METADATA_FILENAME, str, token=None)
)
target = 'images-docker-candidate'
source_manifest = {
'config': {
'digest': expect_id,
},
'layers': [
{'size': 20000,
'digest': 'sha256:123456789'},
{'size': 30000,
'digest': 'sha256:987654321'},
]
}
workflow.data.koji_source_manifest = source_manifest
runner = create_runner(workflow, target=target, blocksize=blocksize,
upload_plugin_name=KojiImportSourceContainerPlugin.key,
userdata=userdata)
runner.run()
if skip_import:
log_msg = "Koji task is not in Open state, but in {}, not importing build".\
format(task_states[0])
assert log_msg in caplog.text
return
data = session.metadata
assert set(data.keys()) == {
'metadata_version',
'build',
'buildroots',
'output',
}
assert data['metadata_version'] in ['0', 0]
build = data['build']
assert isinstance(build, dict)
buildroots = data['buildroots']
assert isinstance(buildroots, list)
assert len(buildroots) > 0
output_files = data['output']
assert isinstance(output_files, list)
expected_keys = {
'name',
'version',
'release',
'source',
'start_time',
'end_time',
'extra', # optional but always supplied
'owner',
}
if has_reserved_build:
expected_keys.add('build_id')
assert set(build.keys()) == expected_keys
if has_reserved_build:
assert build['build_id'] == build_id
assert build['name'] == component
assert build['version'] == version
assert build['release'] == release
assert build['source'] == 'git://hostname/path#123456'
start_time = build['start_time']
assert isinstance(start_time, int) and start_time
end_time = build['end_time']
assert isinstance(end_time, int) and end_time
extra = build['extra']
assert isinstance(extra, dict)
if userdata:
assert extra['custom_user_metadata'] == userdata
else:
assert 'custom_user_metadata' not in extra
assert 'osbs_build' in extra
osbs_build = extra['osbs_build']
assert isinstance(osbs_build, dict)
assert 'kind' in osbs_build
assert osbs_build['kind'] == KOJI_KIND_IMAGE_SOURCE_BUILD
assert 'subtypes' in osbs_build
assert osbs_build['subtypes'] == []
assert 'engine' in osbs_build
assert osbs_build['engine'] == KOJI_SOURCE_ENGINE
assert 'image' in extra
image = extra['image']
assert isinstance(image, dict)
assert image['sources_for_nvr'] == SOURCES_FOR_KOJI_NVR
assert image['sources_signing_intent'] == SOURCES_SIGNING_INTENT
if expected_media_types:
media_types = image['media_types']
assert isinstance(media_types, list)
assert sorted(media_types) == sorted(expected_media_types)
for buildroot in buildroots:
self.validate_buildroot(buildroot, source=True)
# Unique within buildroots in this metadata
assert len([b for b in buildroots
if b['id'] == buildroot['id']]) == 1
for output in output_files:
self.validate_output(output, has_config, source=True)
buildroot_id = output['buildroot_id']
# References one of the buildroots
assert len([buildroot for buildroot in buildroots
if buildroot['id'] == buildroot_id]) == 1
build_id = runner.plugins_results[KojiImportSourceContainerPlugin.key]
assert build_id == "123"
uploaded_filename = 'docker-image-{}.{}.tar.gz'.format(expect_id, os.uname()[4])
assert set(session.uploaded_files.keys()) == {
OSBS_BUILD_LOG_FILENAME,
uploaded_filename,
KOJI_METADATA_FILENAME
}
osbs_build_log = session.uploaded_files[OSBS_BUILD_LOG_FILENAME]
assert osbs_build_log == b"log message A\nlog message B\nlog message C\n"
assert workflow.data.annotations['koji-build-id'] == '123'
@pytest.mark.parametrize('build_metadatas,platform,_filter,expected', [
[{}, None, None, []],
[{}, None, {}, []],
[{}, None, {"type": "docker-image"}, []],
[{"x86_64": {"output": []}}, None, {"type": "docker-image"}, []],
# No output is found by non-existing type.
[{"x86_64": {"output": [{"type": "log"}]}}, None, {"type": "docker-image"}, []],
# No output is found by unknown key.
[{"x86_64": {"output": [{"type": "log"}]}}, None, {"filename": "file.tar.gz"}, []],
[
{"x86_64": {"output": [{"type": "log"}]}},
None,
{"type": "log", "filename": "file.tar.gz"},
[],
],
# Output is found by type.
[
{"x86_64": {"output": [{"type": "log"}, {"type": "docker-image"}]}},
None,
{"type": "docker-image"},
[("x86_64", {"type": "docker-image"})],
],
# No output is found with multiple filters.
[
{
"x86_64": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
},
None,
{"type": "docker-image", "filename": "img-file"},
[],
],
# Find out output with multiple filters
[
{
"x86_64": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
},
None,
{"type": "docker-image", "filename": "img.tar.gz"},
[("x86_64", {"type": "docker-image", "filename": "img.tar.gz"})],
],
# No output if platform does not exist.
[{"x86_64": {"output": [{"type": "log", "filename": "build.log"}]}}, "s390x", None, []],
# Filter outputs by platform
[
{"x86_64": {"output": [{"type": "log", "filename": "build.log"}]}},
"x86_64",
None,
[("x86_64", {"type": "log", "filename": "build.log"})],
],
# Filter outputs by combination of platform and filter
[
{
"x86_64": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
},
"x86_64",
{"type": "docker-image"},
[("x86_64", {"type": "docker-image", "filename": "img.tar.gz"})],
],
# Iterator outputs from multiple platforms
[
{
"x86_64": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
"s390x": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
},
None,
{"type": "docker-image"},
[
("x86_64", {"type": "docker-image", "filename": "img.tar.gz"}),
("s390x", {"type": "docker-image", "filename": "img.tar.gz"}),
],
],
[
{
"x86_64": {
"output": [
{"type": "log", "filename": "build.log"},
{"type": "docker-image", "filename": "img.tar.gz"},
],
},
"s390x": {
# s390x will not be included since no output in docker-image type.
"output": [{"type": "log", "filename": "build.log"}],
},
},
None,
{"type": "docker-image"},
[("x86_64", {"type": "docker-image", "filename": "img.tar.gz"})],
],
])
def test_iter_build_metadata_outputs(
self, build_metadatas, platform, _filter, expected, workflow
):
mock_reactor_config(workflow)
workflow.data.plugins_results[GatherBuildsMetadataPlugin.key] = build_metadatas
plugin = KojiImportPlugin(workflow)
outputs = list(plugin._iter_build_metadata_outputs(platform, _filter=_filter))
assert expected == outputs
@pytest.mark.parametrize("fs_result,expected,log", [
[None, None, None],
[{}, None, "expected filesystem-koji-task-id in result"],
[{"other-result": 1234}, None, "expected filesystem-koji-task-id in result"],
[{"filesystem-koji-task-id": "task_id"}, None, f"invalid task ID {'task_id'!r}"],
[{"filesystem-koji-task-id": 1}, 1, None],
[{"filesystem-koji-task-id": "1"}, 1, None],
])
def test_property_filesystem_koji_task_id(self, fs_result, expected, log, workflow, caplog):
mock_reactor_config(workflow)
workflow.data.plugins_results[PLUGIN_ADD_FILESYSTEM_KEY] = fs_result
plugin = KojiImportPlugin(workflow)
assert expected == plugin._filesystem_koji_task_id
if log is not None:
assert log in caplog.text
@pytest.mark.parametrize('has_op_appregistry_manifests', [True, False])
@pytest.mark.parametrize('has_op_bundle_manifests', [True, False])
def test_binary_build_metadata_includes_exported_operator_manifests(
self, has_op_appregistry_manifests, has_op_bundle_manifests, workflow, source_dir
):
session = MockedClientSession('')
mock_environment(workflow, source_dir,
has_op_appregistry_manifests=has_op_appregistry_manifests,
has_op_bundle_manifests=has_op_bundle_manifests,
name='ns/name', version='1.0', release='1',
session=session)
runner = create_runner(workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'osbs_build' in extra
osbs_build = extra['osbs_build']
if has_op_appregistry_manifests or has_op_bundle_manifests:
assert 'operator_manifests_archive' in extra
operator_manifests = extra['operator_manifests_archive']
assert isinstance(operator_manifests, str)
assert operator_manifests == OPERATOR_MANIFESTS_ARCHIVE
assert 'typeinfo' in extra
assert 'operator-manifests' in extra['typeinfo']
operator_typeinfo = extra['typeinfo']['operator-manifests']
assert isinstance(operator_typeinfo, dict)
assert operator_typeinfo['archive'] == OPERATOR_MANIFESTS_ARCHIVE
else:
assert 'operator_manifests_archive' not in extra
assert 'typeinfo' not in extra
assert osbs_build['subtypes'] == [
stype for yes, stype in [
(has_op_appregistry_manifests, KOJI_SUBTYPE_OP_APPREGISTRY),
(has_op_bundle_manifests, KOJI_SUBTYPE_OP_BUNDLE)
] if yes
]
# Find the operator manifests output
output = None
for item in session.metadata['output']:
if item['type'] == KOJI_BTYPE_OPERATOR_MANIFESTS:
output = item
break
if not has_op_bundle_manifests and not has_op_appregistry_manifests:
assert output is None, \
'Metadata output should not have exported operator manifests.'
return
assert output is not None, 'Missing output of exported operator manifests'
expected_buildroot_id = session.metadata['buildroots'][0]['id']
assert expected_buildroot_id == output['buildroot_id']
assert OPERATOR_MANIFESTS_ARCHIVE in session.uploaded_files
@pytest.mark.parametrize(
"koji_metadata,expected_metadata",
[
(
{"metadata": {"config": {"env": "\x1F"}}},
{"metadata": {"config": {"env": "\\x1f"}}},
),
(
{"metadata": {"config": {"env": "value\x1Fvalue\x1F\n\r\t\x1F\x03"}}},
{"metadata": {"config": {"env": "value\\x1fvalue\\x1f\n\r\t\\x1f\\x03"}}},
),
],
)
def test_escape_non_printable_chars(koji_metadata, expected_metadata):
actual_metadata = escape_non_printable_chars(koji_metadata)
assert expected_metadata == actual_metadata
|
{
"content_hash": "b958b5635b35d7eb6e6fe0e7885dcc68",
"timestamp": "",
"source": "github",
"line_count": 2470,
"max_line_length": 100,
"avg_line_length": 38.000404858299596,
"alnum_prop": 0.5322977594528079,
"repo_name": "fr34k8/atomic-reactor",
"id": "8266363c345db8222b67464a05e23c1bbd252f0a",
"size": "93861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_koji_import.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "868"
},
{
"name": "Python",
"bytes": "2045752"
},
{
"name": "Shell",
"bytes": "3892"
}
],
"symlink_target": ""
}
|
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
))
print(" [x] Sent %r" % message)
connection.close()
|
{
"content_hash": "06a2d63e792bfa327ee25544a2c2b302",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 31.944444444444443,
"alnum_prop": 0.591304347826087,
"repo_name": "maxis1314/pyutils",
"id": "2193d63a1c7ead0f4a34a4f5ae0b0beb60efdde4",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbitmq/new_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "438"
},
{
"name": "CSS",
"bytes": "24813"
},
{
"name": "HTML",
"bytes": "116149"
},
{
"name": "JavaScript",
"bytes": "77412"
},
{
"name": "PLSQL",
"bytes": "585"
},
{
"name": "Python",
"bytes": "386483"
}
],
"symlink_target": ""
}
|
"""
Scan picamera settings to see how they affect image and select prefered settings.
"""
import picamera
scan_settings = {'sharpness': range(0, 125, 25),
'contrast': range(0, 125, 25),
'brightness': range(0, 125, 25)
'saturation': range(0, 125, 25)}
for setting in scan_settings.keys():
for values in settings:
camera = picamera.PiCamera()
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 50
camera.saturation = 0
camera.ISO = 0
camera.video_stabilization = False
camera.exposure_compensation = 0
camera.exposure_mode = 'auto'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 0
camera.hflip = False
camera.vflip = False
camera.crop = (0.0, 0.0, 1.0, 1.0)
|
{
"content_hash": "174b1f85b3608483ee309f43a95223fc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 81,
"avg_line_length": 24.78787878787879,
"alnum_prop": 0.6662591687041565,
"repo_name": "kbsezginel/raspberry-pi",
"id": "6f027d6081d954215ab5adb36870d81fd5c90296",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/rpi/picamera/picamera_scan_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2462"
},
{
"name": "CSS",
"bytes": "4232"
},
{
"name": "HTML",
"bytes": "63964"
},
{
"name": "JavaScript",
"bytes": "537"
},
{
"name": "Python",
"bytes": "60961"
}
],
"symlink_target": ""
}
|
import json
from flask import Flask
from flask_track_usage import TrackUsage
from flask_track_usage.storage.redis_db import RedisStorage
from datetime import datetime
app = Flask(__name__)
redis = RedisStorage()
track = TrackUsage(app, redis)
@app.route('/')
def index():
return "ok"
@track.exclude
@app.route('/usage')
def usage():
now = datetime.now()
yesterday = datetime.fromtimestamp(1421111101) # 2015-1-13 02:05:01
res = redis.get_usage(now, yesterday)
# res = redis.get_usage()
print res
return json.dumps(res)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.run(port=8081, use_reloader=True)
|
{
"content_hash": "12d919a3134b7fccfdc2d3ae1875e44b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 72,
"avg_line_length": 21.896551724137932,
"alnum_prop": 0.7070866141732284,
"repo_name": "ipinak/flask-track-usage",
"id": "8eb99da8329c79eb1496bf800ec8ca6afbbf908f",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71743"
}
],
"symlink_target": ""
}
|
"""Controllers for task queue handlers."""
from __future__ import annotations
import json
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import email_manager
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import question_services
from core.domain import stats_services
from core.domain import suggestion_registry
from core.domain import taskqueue_services
from core.domain import wipeout_service
from typing import Callable, Dict
class UnsentFeedbackEmailHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""Handler task of sending emails of feedback messages."""
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
payload = json.loads(self.request.body)
user_id = payload['user_id']
references = feedback_services.get_feedback_message_references(user_id)
if not references:
# Model may not exist if user has already attended to the feedback.
return
feedback_services.update_feedback_email_retries_transactional(user_id)
messages: Dict[str, email_manager.FeedbackMessagesDict] = {}
for reference in references:
message = feedback_services.get_message(
reference.thread_id, reference.message_id)
exploration = exp_fetchers.get_exploration_by_id(
reference.entity_id)
message_text = message.text
if len(message_text) > 200:
message_text = message_text[:200] + '...'
if exploration.id in messages:
messages[exploration.id]['messages'].append(message_text)
else:
messages[exploration.id] = {
'title': exploration.title,
'messages': [message_text]
}
email_manager.send_feedback_message_email(user_id, messages)
feedback_services.pop_feedback_message_references_transactional(
user_id, len(references))
self.render_json({})
class ContributorDashboardAchievementEmailHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""Handler task of sending email of contributor dashboard achievements."""
URL_PATH_ARGS_SCHEMAS: Dict[str, str] = {}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'contributor_user_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'contribution_type': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'contribution_sub_type': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'language_code': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'rank_name': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
}
}
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
payload = json.loads(self.request.body)
contributor_user_id = payload['contributor_user_id']
contribution_type = payload['contribution_type']
contribution_sub_type = payload['contribution_sub_type']
language_code = payload['language_code']
rank_name = payload['rank_name']
email_info = suggestion_registry.ContributorMilestoneEmailInfo(
contributor_user_id, contribution_type, contribution_sub_type,
language_code, rank_name)
email_manager.send_mail_to_notify_contributor_ranking_achievement(
email_info)
self.render_json({})
class InstantFeedbackMessageEmailHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""Handles task of sending feedback message emails instantly."""
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
payload = json.loads(self.request.body)
user_id = payload['user_id']
reference_dict = payload['reference_dict']
message = feedback_services.get_message(
reference_dict['thread_id'], reference_dict['message_id'])
exploration = exp_fetchers.get_exploration_by_id(
reference_dict['entity_id'])
thread = feedback_services.get_thread(reference_dict['thread_id'])
subject = 'New Oppia message in "%s"' % thread.subject
email_manager.send_instant_feedback_message_email(
user_id, message.author_id, message.text, subject,
exploration.title, reference_dict['entity_id'], thread.subject)
self.render_json({})
class FeedbackThreadStatusChangeEmailHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""Handles task of sending email instantly when feedback thread status is
changed.
"""
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
payload = json.loads(self.request.body)
user_id = payload['user_id']
reference_dict = payload['reference_dict']
old_status = payload['old_status']
new_status = payload['new_status']
message = feedback_services.get_message(
reference_dict['thread_id'], reference_dict['message_id'])
exploration = exp_fetchers.get_exploration_by_id(
reference_dict['entity_id'])
thread = feedback_services.get_thread(reference_dict['thread_id'])
text = 'changed status from %s to %s' % (old_status, new_status)
subject = 'Oppia thread status change: "%s"' % thread.subject
email_manager.send_instant_feedback_message_email(
user_id, message.author_id, text, subject, exploration.title,
reference_dict['entity_id'], thread.subject)
self.render_json({})
class FlagExplorationEmailHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""Handles task of sending emails about flagged explorations
to moderators.
"""
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
payload = json.loads(self.request.body)
exploration_id = payload['exploration_id']
report_text = payload['report_text']
reporter_id = payload['reporter_id']
exploration = exp_fetchers.get_exploration_by_id(exploration_id)
email_manager.send_flag_exploration_email(
exploration.title, exploration_id, reporter_id, report_text)
self.render_json({})
class DeferredTasksHandler(
base.BaseHandler[Dict[str, str], Dict[str, str]]
):
"""This task handler handles special tasks that make single asynchronous
function calls. For more complex tasks that require a large number of
function calls, the correct approach is to create a special url handler that
handles that specific task. However, it doesn't make sense to create a url
handler for single function calls. This handler handles those cases.
The convention of function ids and an explanation of the different queue
names exists in 'core/domain/taskqueue_services.py' file.
"""
DEFERRED_TASK_FUNCTIONS: Dict[str, Callable[..., None]] = {
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_USER_MODELS: (
exp_services.delete_explorations_from_user_models),
taskqueue_services.FUNCTION_ID_DELETE_EXPS_FROM_ACTIVITIES: (
exp_services.delete_explorations_from_activities),
taskqueue_services.FUNCTION_ID_DELETE_USERS_PENDING_TO_BE_DELETED: (
wipeout_service.delete_users_pending_to_be_deleted),
taskqueue_services.FUNCTION_ID_CHECK_COMPLETION_OF_USER_DELETION: (
wipeout_service.check_completion_of_user_deletion),
taskqueue_services.FUNCTION_ID_REGENERATE_EXPLORATION_SUMMARY: (
exp_services.regenerate_exploration_summary_with_new_contributor),
taskqueue_services.FUNCTION_ID_UPDATE_STATS: (
stats_services.update_stats),
taskqueue_services.FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS: (
question_services.untag_deleted_misconceptions),
taskqueue_services.FUNCTION_ID_REMOVE_USER_FROM_RIGHTS_MODELS: (
wipeout_service
.remove_user_from_activities_with_associated_rights_models)
}
@acl_decorators.can_perform_tasks_in_taskqueue
def post(self) -> None:
# The request body has bytes type, thus we need to decode it first.
payload = json.loads(self.request.body.decode('utf-8'))
if 'fn_identifier' not in payload:
raise Exception(
'This request cannot defer tasks because it does not contain a '
'function identifier attribute (fn_identifier). Deferred tasks '
'must contain a function_identifier in the payload.')
if payload['fn_identifier'] not in self.DEFERRED_TASK_FUNCTIONS:
raise Exception(
'The function id, %s, is not valid.' % payload['fn_identifier'])
deferred_task_function = self.DEFERRED_TASK_FUNCTIONS[
payload['fn_identifier']]
deferred_task_function(*payload['args'], **payload['kwargs'])
self.render_json({})
|
{
"content_hash": "e28b6a99123088538ea250fb988b8ad6",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 38.78455284552845,
"alnum_prop": 0.6292841421234672,
"repo_name": "oppia/oppia",
"id": "2d027a76d9cf22e028ed0765e99ae489a479e3fc",
"size": "10146",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/controllers/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import absolute_import
from django.contrib import admin
from .models import Gallery, GalleryItem
admin.site.register(Gallery)
admin.site.register(GalleryItem)
|
{
"content_hash": "0d22ff70dc5d35a3ed96e772de26d4e6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 21.9,
"alnum_prop": 0.7990867579908676,
"repo_name": "DjenieLabs/django-magic-gallery",
"id": "056fa3e0776d66bf9f6abbf03bf35ef9a7b3d98c",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magicgallery/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23349"
},
{
"name": "HTML",
"bytes": "13092"
},
{
"name": "JavaScript",
"bytes": "68325"
},
{
"name": "Makefile",
"bytes": "1253"
},
{
"name": "Python",
"bytes": "28388"
}
],
"symlink_target": ""
}
|
'''
Tests Plaid
'''
import datetime
from unittest import mock
import pytest
import authentication.plaid_wrapper as PlaidMiddleware
import plaid
from plaid.api.transactions import Transactions
from plaid_test_decorators import mock_plaid_balance, \
mock_plaid_accounts, mock_plaid_transactions
def setup_module(cls):
'''Setting up testing'''
cls.original_init_method = plaid.__init__
plaid.__init__ = mock.Mock(return_value=None)
plaid.__call__ = lambda self, request: self.get_response(request)
def teardown_module(cls):
'''Teardown testing'''
plaid.__init__ = cls.original_init_method
@mock_plaid_balance
@pytest.mark.django_db(transaction=True)
def test_current_balance():
'''
Testing PlaidMiddleware.PlaidAPI.current_balance()
'''
client = plaid.Client(client_id='', secret='', public_key='', environment='')
user = PlaidMiddleware.PlaidAPI(access_token='', client=client)
balance = user.current_balance()
assert balance == -9.0
user.balance = 10
balance = user.current_balance()
assert balance == 10
@mock_plaid_accounts
@pytest.mark.django_db(transaction=True)
def test_account_name():
'''
Testing PlaidMiddleware.PlaidAPI.account_name()
'''
client = plaid.Client(client_id='', secret='', public_key='', environment='')
user = PlaidMiddleware.PlaidAPI(access_token='', client=client)
account_name = user.account_name()
assert account_name == 'Test Account'
@mock.patch.object(
Transactions,
'get',
mock.MagicMock(return_value={
'transactions': [
{
'date': (
datetime.datetime.now() - datetime.timedelta(days=10)
).strftime("%Y-%m-%d"),
'amount': 100,
},
{
'date': (
datetime.datetime.now() - datetime.timedelta(days=13)
).strftime("%Y-%m-%d"),
'amount': 1000,
}
]
})
)
@mock_plaid_balance
@pytest.mark.django_db(transaction=True)
def test_historical_data():
'''
Testing PlaidMiddleware.PlaidAPI.historical_data()
'''
client = plaid.Client(client_id='', secret='', public_key='', environment='')
user = PlaidMiddleware.PlaidAPI(access_token='', client=client)
start_date = datetime.datetime.now() - datetime.timedelta(days=365)
data = user.historical_data(start_date)
end = datetime.datetime.now().strftime("%Y-%m-%d")
mock_data = [
(end, -9.0),
(
(datetime.datetime.now() - datetime.timedelta(days=10)).strftime("%Y-%m-%d"),
-109.0
),
(
(datetime.datetime.now() - datetime.timedelta(days=13)).strftime("%Y-%m-%d"),
-1109.0
),
]
assert len(data) == len(mock_data)
assert data == mock_data
@mock_plaid_transactions
@pytest.mark.django_db(transaction=True)
def test_income():
'''
Testing PlaidMiddleware.PlaidAPI.income()
'''
client = plaid.Client(client_id='', secret='', public_key='', environment='')
user = PlaidMiddleware.PlaidAPI(access_token='', client=client)
income = user.income()
assert income == 1135.0
income2 = user.income(days=13)
assert income2 == 1125.0
income3 = user.income(days=11)
assert income3 == 0
@mock_plaid_transactions
@pytest.mark.django_db(transaction=True)
def test_expenditure():
'''
Testing PlaidMiddleware.PlaidAPI.expenditure()
'''
client = plaid.Client(client_id='', secret='', public_key='', environment='')
user = PlaidMiddleware.PlaidAPI(access_token='', client=client)
expenditure = user.expenditure()
assert expenditure == -150
expenditure2 = user.expenditure(days=5)
assert expenditure2 == 0.0
|
{
"content_hash": "8bf09955a9ef2cf46f3723a06e29753a",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 89,
"avg_line_length": 29.546875,
"alnum_prop": 0.6213643574828134,
"repo_name": "Neitsch/ASE4156",
"id": "bba6645e19c7d4684af3a474b9f3f14cf119350a",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plaid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "6962"
},
{
"name": "JavaScript",
"bytes": "148908"
},
{
"name": "PHP",
"bytes": "219"
},
{
"name": "Python",
"bytes": "175771"
}
],
"symlink_target": ""
}
|
import string
import os
import struct
import e32
from audio import *
def load_voice(vofilename):
global voice, voicefile
if voice.has_key(vofilename):
tempfile=file(THIS_PATH+u'temp.wav','wb')
voicefile.seek(voice[vofilename][0])
tempfile.seek(0)
tempfile.write(voicefile.read(voice[vofilename][1]))
tempfile.close()
def remove_null_end(srcstring):
pos=srcstring.find('\0')
if pos==-1:
return srcstring
else:
return srcstring[:pos]
def load_voice_list():
global voice,voicefile
#read the voice file list
voicefilecount=struct.unpack('i',voicefile.read(4))[0]
voice={}
i=0
while i< voicefilecount:
filename=remove_null_end(voicefile.read(32))
fileoffset=struct.unpack('i',voicefile.read(4))[0]
filelength=struct.unpack('i',voicefile.read(4))[0]
voice[filename]=(fileoffset,filelength)
i+=1
if e32.in_emulator():
THIS_PATH=u'c:\\data\\python\\'
else:
THIS_PATH=u'e:\\python\\'
vo=None
voice={}
voicefile=file(THIS_PATH+u'voice.pak','rb')
load_voice_list()
for i in voice:
if vo:
vo.stop()
vo.close()
print i
load_voice(i)
lowername=i.lower()
vo=Sound.open(THIS_PATH+u'temp.wav')
vo.set_volume(1)
if not e32.in_emulator():
vo.play()
e32.ao_sleep(5)
|
{
"content_hash": "7e512defa4ddf88d110d992bb127897b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 60,
"avg_line_length": 23.859649122807017,
"alnum_prop": 0.6242647058823529,
"repo_name": "flyher/pymo",
"id": "232710db6bca5f7beaacc40a15b25f8ee1ec0da7",
"size": "1987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/unpackS60.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "581274"
},
{
"name": "C++",
"bytes": "151108"
},
{
"name": "Clarion",
"bytes": "2743"
},
{
"name": "Groff",
"bytes": "13374"
},
{
"name": "HTML",
"bytes": "240526"
},
{
"name": "Java",
"bytes": "149837"
},
{
"name": "Makefile",
"bytes": "144854"
},
{
"name": "Python",
"bytes": "16929339"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "29384"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import random
# shuffled_deck: will return a shuffled deck to the user
# input:
# output: a list representing a shuffled deck
def shuffled_deck():
basic_deck = range(2, 15) * 4
random.shuffle(basic_deck)
return basic_deck
# player_turn: takes in a player name, player_name, and draws/removes a card from the deck,
# prints "user drew card x", and returns the value
# input: player_name, string
# output: string
def player_turn(player_name):
card = deck.pop()
print player_name, 'drew card', card
return str(card)
# p1_card: player 1's card
# p2_card: player 2's card
# Returns the winner's name
# input: p1_card, int
# p2_card, int
# output: string
def compare_scores(p1_card, p2_card):
if p1_card > p2_card:
return player1
if p2_card > p1_card:
return player2
# The following are declared in the global scope to allow all functions to reference them
deck = []
player1 = raw_input("What is player 1's name?")
player2 = raw_input("What is player 2's name?")
def play():
cards = shuffled_deck()
deck.extend(cards) # since the deck is empty, we add a new set of shuffled cards each time we play
p1_score = 0
p2_score = 0
while len(deck):
card1 = player_turn(player1)
card2 = player_turn(player2)
winner = compare_scores(card1, card2)
if winner is player1:
p1_score += 1
if winner is player2:
p2_score += 1
# the deck is now empty again
print '------'
print player1, ': score of ', p1_score
print player2, ': score of ', p2_score
if p1_score > p2_score:
print player1 + ' wins!'
if p2_score > p1_score:
print player2 + ' wins!'
if p2_score == p1_score:
print 'Players tied.'
print '------'
redo = raw_input('Type "redo" and hit enter to restart the game, or anything else to quit: ')
if redo == 'redo':
play() # restart the game
play() # start the game for the first time
|
{
"content_hash": "309d721757f823f4d155a79e23baeb85",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 103,
"avg_line_length": 26.76,
"alnum_prop": 0.6243148978574987,
"repo_name": "bensk/CS9",
"id": "e492e674b86dae23986a6d3162577835b8b65c3e",
"size": "2007",
"binary": false,
"copies": "3",
"ref": "refs/heads/gh-pages",
"path": "Code Examples/War.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "223049"
},
{
"name": "HTML",
"bytes": "42810"
},
{
"name": "JavaScript",
"bytes": "3384"
},
{
"name": "Jupyter Notebook",
"bytes": "3824"
},
{
"name": "Python",
"bytes": "65943"
},
{
"name": "Ruby",
"bytes": "1728"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from fossevents.events import services as event_services
def homepage(request):
ctx = {
'events': event_services.get_public_event_listings()
}
return render(request, 'pages/home.html', ctx)
|
{
"content_hash": "46f7c42aa18415e4cafdd7602af7f99e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.7137096774193549,
"repo_name": "vipul-sharma20/fossevents.in",
"id": "dd8eb3d2ffdb0150765ea420dd95d93aa78a8292",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fossevents/pages/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "6001"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "43719"
},
{
"name": "Shell",
"bytes": "2183"
}
],
"symlink_target": ""
}
|
logprint ("Starting EVLA_pipe_fluxflag.py", logfileout='logs/fluxflag.log')
#time_list=runtiming('fluxflag', 'start')
default('plotcal')
caltable='fluxgaincal.g'
xaxis='time'
yaxis='amp'
subplot=111
iteration=''
plotcal()
logprint ("Finished EVLA_pipe_fluxflag.py", logfileout='logs/fluxflag.log')
#time_list=runtiming('fluxflag', 'end')
pipeline_save()
|
{
"content_hash": "757c7d7c39027e0dd29ef663b30f2240",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 22.375,
"alnum_prop": 0.7402234636871509,
"repo_name": "e-koch/VLA_Lband",
"id": "fe0af85f92166356feca752fbdfa5566ea4df872",
"size": "1863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "16B/pipeline4.7.1_custom/EVLA_pipe_fluxflag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2740022"
},
{
"name": "Shell",
"bytes": "98570"
}
],
"symlink_target": ""
}
|
__author__ = 'jhala'
import re
import Helpers
import logging
import logging.config
import json
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
def ToDict(matlabFeatureOutputFile):
isData=False
imgDict = {}
featureName=''
stat=0
for line in open(matlabFeatureOutputFile):
lineStr=line.strip().replace(" ","").replace("'","")
if re.search("^.*:.*$",lineStr) : #if its after and not an empty line.
#print lineStr
lineArr=lineStr.split(":")
if lineArr[0]=='imgName':
featureName='imgName'
stat=lineArr[1:]
else:
featureName=lineArr[0]
stat=lineArr[1]
if stat == "NaN":
stat = 0
#print featureName,stat
if featureName == 'imgName':
1
# try:
# imgDict['imgName']
# except KeyError:
# imgDict['imgName']= stat
else:
try:
imgDict['data']
except KeyError:
imgDict['data'] = {}
imgDict['data'][featureName] = str(float(stat))
1 #imgDict["data"][featureName] = float(stat)
#l='4.840458391220713e-01'
#print float(l)
return imgDict
|
{
"content_hash": "bf29b47f949ff26db4b68420e8b201a1",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 23.233333333333334,
"alnum_prop": 0.4878048780487805,
"repo_name": "dbk138/ImageRegionRecognition-FrontEnd",
"id": "d17341026f1f5b417c09836fa4024d38c4bcc69c",
"size": "1394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/python - Copy/SerializeImageFeatures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2883"
},
{
"name": "JavaScript",
"bytes": "21327"
},
{
"name": "M",
"bytes": "647"
},
{
"name": "Matlab",
"bytes": "39453"
},
{
"name": "Python",
"bytes": "62891"
},
{
"name": "R",
"bytes": "192"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6845"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import unittest
from unittest import mock
import pytest
from docker import APIClient, types
from docker.constants import DEFAULT_TIMEOUT_SECONDS
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator
class TestDockerSwarmOperator(unittest.TestCase):
@mock.patch('airflow.providers.docker.operators.docker.APIClient')
@mock.patch('airflow.providers.docker.operators.docker_swarm.types')
def test_execute(self, types_mock, client_class_mock):
mock_obj = mock.Mock()
def _client_tasks_side_effect():
for _ in range(2):
yield [{'Status': {'State': 'pending'}}]
while True:
yield [{'Status': {'State': 'complete'}}]
def _client_service_logs_effect():
yield b'Testing is awesome.'
client_mock = mock.Mock(spec=APIClient)
client_mock.create_service.return_value = {'ID': 'some_id'}
client_mock.service_logs.return_value = _client_service_logs_effect()
client_mock.images.return_value = []
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.tasks.side_effect = _client_tasks_side_effect()
types_mock.TaskTemplate.return_value = mock_obj
types_mock.ContainerSpec.return_value = mock_obj
types_mock.RestartPolicy.return_value = mock_obj
types_mock.Resources.return_value = mock_obj
client_class_mock.return_value = client_mock
operator = DockerSwarmOperator(
api_version='1.19',
command='env',
environment={'UNIT': 'TEST'},
image='ubuntu:latest',
mem_limit='128m',
user='unittest',
task_id='unittest',
mounts=[types.Mount(source='/host/path', target='/container/path', type='bind')],
auto_remove=True,
tty=True,
configs=[types.ConfigReference(config_id="dummy_cfg_id", config_name="dummy_cfg_name")],
secrets=[types.SecretReference(secret_id="dummy_secret_id", secret_name="dummy_secret_name")],
mode=types.ServiceMode(mode="replicated", replicas=3),
networks=["dummy_network"],
placement=types.Placement(constraints=["node.labels.region==east"]),
)
operator.execute(None)
types_mock.TaskTemplate.assert_called_once_with(
container_spec=mock_obj,
restart_policy=mock_obj,
resources=mock_obj,
networks=["dummy_network"],
placement=types.Placement(constraints=["node.labels.region==east"]),
)
types_mock.ContainerSpec.assert_called_once_with(
image='ubuntu:latest',
command='env',
user='unittest',
mounts=[types.Mount(source='/host/path', target='/container/path', type='bind')],
tty=True,
env={'UNIT': 'TEST', 'AIRFLOW_TMP_DIR': '/tmp/airflow'},
configs=[types.ConfigReference(config_id="dummy_cfg_id", config_name="dummy_cfg_name")],
secrets=[types.SecretReference(secret_id="dummy_secret_id", secret_name="dummy_secret_name")],
)
types_mock.RestartPolicy.assert_called_once_with(condition='none')
types_mock.Resources.assert_called_once_with(mem_limit='128m')
client_class_mock.assert_called_once_with(
base_url='unix://var/run/docker.sock', tls=None, version='1.19', timeout=DEFAULT_TIMEOUT_SECONDS
)
client_mock.service_logs.assert_called_once_with(
'some_id', follow=True, stdout=True, stderr=True, is_tty=True
)
csargs, cskwargs = client_mock.create_service.call_args_list[0]
assert len(csargs) == 1, 'create_service called with different number of arguments than expected'
assert csargs == (mock_obj,)
assert cskwargs['labels'] == {'name': 'airflow__adhoc_airflow__unittest'}
assert cskwargs['name'].startswith('airflow-')
assert cskwargs['mode'] == types.ServiceMode(mode="replicated", replicas=3)
assert client_mock.tasks.call_count == 5
client_mock.remove_service.assert_called_once_with('some_id')
@mock.patch('airflow.providers.docker.operators.docker.APIClient')
@mock.patch('airflow.providers.docker.operators.docker_swarm.types')
def test_auto_remove(self, types_mock, client_class_mock):
mock_obj = mock.Mock()
client_mock = mock.Mock(spec=APIClient)
client_mock.create_service.return_value = {'ID': 'some_id'}
client_mock.images.return_value = []
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.tasks.return_value = [{'Status': {'State': 'complete'}}]
types_mock.TaskTemplate.return_value = mock_obj
types_mock.ContainerSpec.return_value = mock_obj
types_mock.RestartPolicy.return_value = mock_obj
types_mock.Resources.return_value = mock_obj
client_class_mock.return_value = client_mock
operator = DockerSwarmOperator(image='', auto_remove=True, task_id='unittest', enable_logging=False)
operator.execute(None)
client_mock.remove_service.assert_called_once_with('some_id')
@mock.patch('airflow.providers.docker.operators.docker.APIClient')
@mock.patch('airflow.providers.docker.operators.docker_swarm.types')
def test_no_auto_remove(self, types_mock, client_class_mock):
mock_obj = mock.Mock()
client_mock = mock.Mock(spec=APIClient)
client_mock.create_service.return_value = {'ID': 'some_id'}
client_mock.images.return_value = []
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.tasks.return_value = [{'Status': {'State': 'complete'}}]
types_mock.TaskTemplate.return_value = mock_obj
types_mock.ContainerSpec.return_value = mock_obj
types_mock.RestartPolicy.return_value = mock_obj
types_mock.Resources.return_value = mock_obj
client_class_mock.return_value = client_mock
operator = DockerSwarmOperator(image='', auto_remove=False, task_id='unittest', enable_logging=False)
operator.execute(None)
assert (
client_mock.remove_service.call_count == 0
), 'Docker service being removed even when `auto_remove` set to `False`'
@parameterized.expand([('failed',), ('shutdown',), ('rejected',), ('orphaned',), ('remove',)])
@mock.patch('airflow.providers.docker.operators.docker.APIClient')
@mock.patch('airflow.providers.docker.operators.docker_swarm.types')
def test_non_complete_service_raises_error(self, status, types_mock, client_class_mock):
mock_obj = mock.Mock()
client_mock = mock.Mock(spec=APIClient)
client_mock.create_service.return_value = {'ID': 'some_id'}
client_mock.images.return_value = []
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.tasks.return_value = [{'Status': {'State': status}}]
types_mock.TaskTemplate.return_value = mock_obj
types_mock.ContainerSpec.return_value = mock_obj
types_mock.RestartPolicy.return_value = mock_obj
types_mock.Resources.return_value = mock_obj
client_class_mock.return_value = client_mock
operator = DockerSwarmOperator(image='', auto_remove=False, task_id='unittest', enable_logging=False)
msg = "Service did not complete: {'ID': 'some_id'}"
with pytest.raises(AirflowException) as ctx:
operator.execute(None)
assert str(ctx.value) == msg
def test_on_kill(self):
client_mock = mock.Mock(spec=APIClient)
operator = DockerSwarmOperator(image='', auto_remove=False, task_id='unittest', enable_logging=False)
operator.cli = client_mock
operator.service = {'ID': 'some_id'}
operator.on_kill()
client_mock.remove_service.assert_called_once_with('some_id')
|
{
"content_hash": "4bc3c9d8aef4e216d0ba4cbe8980f1c4",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 109,
"avg_line_length": 44.33516483516483,
"alnum_prop": 0.6435741727599454,
"repo_name": "cfei18/incubator-airflow",
"id": "4751445f58493353c234553e36fb0eed486735bb",
"size": "8856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/docker/operators/test_docker_swarm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from pmsal.bid.views import EntryListView
urlpatterns = patterns(
'pmsal.bid.views',
url(r'^$', EntryListView.as_view(), name='home'),
)
|
{
"content_hash": "56227955e1f51ea82eb19a94056b4b55",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.7015706806282722,
"repo_name": "klebercode/pmsal",
"id": "15d8bf16223a0ba8463a1885930678b4390108c3",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmsal/bid/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129165"
},
{
"name": "HTML",
"bytes": "78095"
},
{
"name": "JavaScript",
"bytes": "204743"
},
{
"name": "Python",
"bytes": "52737"
}
],
"symlink_target": ""
}
|
from xml.auto_gen import XMLDAOListBase
from sql.auto_gen import SQLDAOListBase
from vistrails.core.system import get_elementtree_library
from vistrails.db import VistrailsDBException
from vistrails.db.versions.v0_9_3 import version as my_version
from vistrails.db.versions.v0_9_3.domain import DBVistrail, DBWorkflow, DBLog, \
DBAbstraction, DBGroup
ElementTree = get_elementtree_library()
class DAOList(dict):
def __init__(self):
self['xml'] = XMLDAOListBase()
self['sql'] = SQLDAOListBase()
def parse_xml_file(self, filename):
return ElementTree.parse(filename)
def write_xml_file(self, filename, tree):
tree.write(filename)
def read_xml_object(self, vtType, node):
return self['xml'][vtType].fromXML(node)
def write_xml_object(self, obj, node=None):
res_node = self['xml'][obj.vtType].toXML(obj, node)
return res_node
def open_from_xml(self, filename, vtType, tree=None):
"""open_from_xml(filename) -> DBVistrail"""
if tree is None:
tree = self.parse_xml_file(filename)
vistrail = self.read_xml_object(vtType, tree.getroot())
return vistrail
def save_to_xml(self, obj, filename, tags, version=None):
"""save_to_xml(obj : object, filename: str, tags: dict,
version: str) -> None
"""
root = self.write_xml_object(obj)
if version is None:
version = my_version
root.set('version', version)
for k, v in tags.iteritems():
root.set(k, v)
tree = ElementTree.ElementTree(root)
self.write_xml_file(filename, tree)
def open_from_db(self, db_connection, vtType, id, lock=False):
all_objects = {}
global_props = {'id': id}
# print global_props
res_objects = self['sql'][vtType].get_sql_columns(db_connection,
global_props,
lock)
if len(res_objects) > 1:
raise VistrailsDBException("More than object of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
elif len(res_objects) <= 0:
raise VistrailsDBException("No objects of type '%s' and "
"id '%s' exist in the database" % \
(vtType, id))
all_objects.update(res_objects)
res = res_objects.values()[0]
del global_props['id']
for dao in self['sql'].itervalues():
if (dao == self['sql'][DBVistrail.vtType] or
# dao == self['sql'][DBWorkflow.vtType] or
dao == self['sql'][DBLog.vtType] or
dao == self['sql'][DBAbstraction.vtType]):
continue
current_objs = dao.get_sql_columns(db_connection, global_props,
lock)
if dao == self['sql'][DBWorkflow.vtType]:
for key, obj in current_objs.iteritems():
if key[0] == vtType and key[1] == id:
continue
elif key[0] == DBWorkflow.vtType:
res_obj = self.open_from_db(db_connection, key[0],
key[1], lock)
res_dict = {}
res_dict[(res_obj.vtType, res_obj.db_id)] = res_obj
all_objects.update(res_dict)
else:
all_objects.update(current_objs)
for key, obj in all_objects.iteritems():
if key[0] == vtType and key[1] == id:
continue
self['sql'][obj.vtType].from_sql_fast(obj, all_objects)
for obj in all_objects.itervalues():
obj.is_dirty = False
obj.is_new = False
return res
def save_to_db(self, db_connection, obj, do_copy=False, global_props=None):
if do_copy and obj.db_id is not None:
obj.db_id = None
children = obj.db_children() # forSQL=True)
children.reverse()
if global_props is None:
global_props = {'entity_type': obj.vtType}
# print 'global_props:', global_props
# assumes not deleting entire thing
(child, _, _) = children[0]
self['sql'][child.vtType].set_sql_columns(db_connection, child,
global_props, do_copy)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
global_props = {'entity_id': child.db_id,
'entity_type': child.vtType}
if not do_copy:
for (child, _, _) in children:
for c in child.db_deleted_children(True):
self['sql'][c.vtType].delete_sql_column(db_connection,
c,
global_props)
(child, _, _) = children.pop(0)
child.is_dirty = False
child.is_new = False
for (child, _, _) in children:
# print "child:", child.vtType, child.db_id
self['sql'][child.vtType].set_sql_columns(db_connection, child,
global_props, do_copy)
self['sql'][child.vtType].to_sql_fast(child, do_copy)
if child.vtType == DBGroup.vtType:
if child.db_workflow:
# print '*** entity_type:', global_props['entity_type']
self.save_to_db(db_connection, child.db_workflow,
do_copy,
{'entity_id': global_props['entity_id'],
'entity_type': \
global_props['entity_type']}
)
child.is_dirty = False
child.is_new = False
def serialize(self, object):
root = self.write_xml_object(object)
return ElementTree.tostring(root)
def unserialize(self, str, obj_type):
def set_dirty(obj):
for child, _, _ in obj.db_children():
if child.vtType == DBGroup.vtType:
if child.db_workflow:
set_dirty(child.db_workflow)
child.is_dirty = True
child.is_new = True
try:
root = ElementTree.fromstring(str)
obj = self.read_xml_object(obj_type, root)
set_dirty(obj)
return obj
except SyntaxError, e:
msg = "Invalid VisTrails serialized object %s" % str
raise VistrailsDBException(msg)
return None
|
{
"content_hash": "3d14b1db351df340e549671e6e1e3805",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 40.96470588235294,
"alnum_prop": 0.4922458357265939,
"repo_name": "Nikea/VisTrails",
"id": "ce764d581d33bf57afe5262987370d92fce12e7a",
"size": "8844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/db/versions/v0_9_3/persistence/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
import json
import logging
import StringIO
import unittest
from telemetry.internal.backends.remote import trybot_browser_finder
from telemetry.internal.browser import browser_options
from telemetry.testing import simple_mock
from telemetry.testing import system_stub
class TrybotBrowserFinderTest(unittest.TestCase):
def setUp(self):
self.log_output = StringIO.StringIO()
self.stream_handler = logging.StreamHandler(self.log_output)
logging.getLogger().addHandler(self.stream_handler)
self._real_subprocess = trybot_browser_finder.subprocess
self._real_urllib2 = trybot_browser_finder.urllib2
self._stubs = system_stub.Override(trybot_browser_finder,
['sys', 'open', 'os'])
def tearDown(self):
logging.getLogger().removeHandler(self.stream_handler)
self.log_output.close()
trybot_browser_finder.subprocess = self._real_subprocess
trybot_browser_finder.urllib2 = self._real_urllib2
self._stubs.Restore()
def _ExpectProcesses(self, args):
mock_subprocess = simple_mock.MockObject()
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
for arg in args:
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn(arg[1][1:])
mock_popen.ExpectCall('poll').WillReturn(arg[1][0])
mock_subprocess.ExpectCall(
'Popen').WithArgs(arg[0]).WillReturn(mock_popen)
trybot_browser_finder.subprocess = mock_subprocess
def _MockTryserverJson(self, bots_dict):
trybot_browser_finder.urllib2 = simple_mock.MockObject()
trybot_browser_finder.urllib2.ExpectCall('urlopen').WithArgs(
'http://build.chromium.org/p/tryserver.chromium.perf/json').WillReturn(
StringIO.StringIO(json.dumps({'builders': bots_dict})))
def test_find_all_browser_types_list(self):
finder_options = browser_options.BrowserFinderOptions(browser_type='list')
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
expected_trybots_list = [
'trybot-all',
'trybot-all-android',
'trybot-all-linux',
'trybot-all-mac',
'trybot-all-win',
'trybot-android-nexus4',
'trybot-mac-10-9'
]
self.assertEquals(
expected_trybots_list,
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_find_all_browser_types_trybot(self):
finder_options = browser_options.BrowserFinderOptions(
browser_type='trybot-win')
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
expected_trybots_list = [
'trybot-all',
'trybot-all-android',
'trybot-all-linux',
'trybot-all-mac',
'trybot-all-win',
'trybot-android-nexus4',
'trybot-mac-10-9'
]
self.assertEquals(
expected_trybots_list,
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_find_all_browser_types_non_trybot_browser(self):
finder_options = browser_options.BrowserFinderOptions(
browser_type='release')
trybot_browser_finder.urllib2 = simple_mock.MockObject()
self.assertEquals(
[],
# pylint: disable=protected-access
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_constructor(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self.assertEquals('android', browser.target_os)
# pylint: disable=protected-access
self.assertTrue('android' in browser._builder_names)
self.assertEquals(['android_nexus4_perf_bisect'],
browser._builder_names.get('android'))
def test_support_with_chrome_root(self):
finder_options = browser_options.BrowserFinderOptions()
finder_options.profile_dir = None
finder_options.chrome_root = '/a/b/c'
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self.assertTrue(browser.SupportsOptions(finder_options))
def test_constructor_trybot_all(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'mac_10_9_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff1',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=protected-access
self.assertEquals(
['android', 'linux', 'mac', 'win', 'win-x64'],
sorted(browser._builder_names))
self.assertEquals(
['android_nexus4_perf_bisect', 'android_nexus5_perf_bisect'],
sorted(browser._builder_names.get('android')))
self.assertEquals(
['mac_10_9_perf_bisect', 'mac_perf_bisect'],
sorted(browser._builder_names.get('mac')))
self.assertEquals(
['linux_perf_bisect'], sorted(browser._builder_names.get('linux')))
self.assertEquals(
['win_perf_bisect'], sorted(browser._builder_names.get('win')))
self.assertEquals(
['win_x64_perf_bisect'], sorted(browser._builder_names.get('win-x64')))
def test_constructor_trybot_all_win(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'win_8_perf_bisect': 'otherstuff',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-win', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=protected-access
self.assertEquals(
['win', 'win-x64'],
sorted(browser._builder_names))
self.assertEquals(
['win_8_perf_bisect', 'win_perf_bisect'],
sorted(browser._builder_names.get('win')))
self.assertEquals(
['win_x64_perf_bisect'], sorted(browser._builder_names.get('win-x64')))
def test_constructor_trybot_all_android(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'win_8_perf_bisect': 'otherstuff',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-android', finder_options)
self.assertEquals(
['android_nexus4_perf_bisect', 'android_nexus5_perf_bisect'],
sorted(browser._builder_names.get('android')))
def test_constructor_trybot_all_mac(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'win_8_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff2',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-mac', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=protected-access
self.assertEquals(
['mac'],
sorted(browser._builder_names))
self.assertEquals(
['mac_perf_bisect'],
sorted(browser._builder_names.get('mac')))
def test_constructor_trybot_all_linux(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'linux_perf_bisect': 'stuff1',
'win_8_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff2',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-linux', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=protected-access
self.assertEquals(
['linux'],
sorted(browser._builder_names))
self.assertEquals(
['linux_perf_bisect'],
sorted(browser._builder_names.get('linux')))
def test_no_git(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (128, None, None)),
))
browser.RunRemote()
self.assertEquals(
'Must be in a git repository to send changes to trybots.\n',
self.log_output.getvalue())
def test_dirty_tree(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, 'dirty tree', None)),
))
browser.RunRemote()
self.assertEquals(
'Cannot send a try job with a dirty tree. Commit locally first.\n',
self.log_output.getvalue())
def test_no_local_commits(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None)),
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None)),
))
browser.RunRemote()
self.assertEquals(
('No local changes found in chromium or blink trees. '
'browser=trybot-android-nexus4 argument sends local changes to the '
'perf trybot(s): '
'[[\'android_nexus4_perf_bisect\']].\n'),
self.log_output.getvalue())
def test_branch_checkout_fails(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, 'logs here', None)),
(['git', 'checkout', '-b', 'telemetry-tryjob'],
(1, None, 'fatal: A branch named \'telemetry-try\' already exists.')),
))
browser.RunRemote()
self.assertEquals(
('Error creating branch telemetry-tryjob. '
'Please delete it if it exists.\n'
'fatal: A branch named \'telemetry-try\' already exists.\n'),
self.log_output.getvalue())
def _GetConfigForBrowser(self, name, platform, branch, cfg_filename,
is_blink=False):
finder_options = browser_options.BrowserFinderOptions()
bot = '%s_perf_bisect' % name.replace('trybot-', '').replace('-', '_')
self._MockTryserverJson({bot: 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(name, finder_options)
first_processes = ()
if is_blink:
first_processes = (
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None))
)
self._ExpectProcesses(first_processes + (
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, branch, None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, 'logs here', None)),
(['git', 'checkout', '-b', 'telemetry-tryjob'], (0, None, None)),
(['git', 'branch', '--set-upstream-to', 'origin/master'],
(0, None, None)),
(['git', 'commit', '-a', '-m', 'bisect config: %s' % platform],
(0, None, None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on %s' % platform],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b', bot],
(0, None, None)),
(['git', 'checkout', branch], (0, None, None)),
(['git', 'branch', '-D', 'telemetry-tryjob'], (0, None, None))
))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
browser.RunRemote()
return cfg.getvalue()
def test_config_android(self):
config = self._GetConfigForBrowser(
'trybot-android-nexus4', 'android', 'somebranch',
'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=android-chromium sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_mac(self):
config = self._GetConfigForBrowser(
'trybot-mac-10-9', 'mac', 'currentwork', 'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=release sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_win_x64(self):
config = self._GetConfigForBrowser(
'trybot-win-x64', 'win-x64', 'currentwork', 'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "python tools\\\\perf\\\\run_benchmark '
'--browser=release_x64 sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "x64",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_blink(self):
config = self._GetConfigForBrowser(
'trybot-mac-10-9', 'mac', 'blinkbranch',
'Tools/run-perf-test.cfg', True)
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=release sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_update_config_git_commit_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(128, 'None', 'commit failed')),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_upload_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(128, None, 'error')),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_try_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (128, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_try(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertEquals((0, 'https://codereview.chromium.org/12345'),
browser._UpdateConfigAndRunTryjob('android', cfg_filename))
|
{
"content_hash": "b1a1565d0a897b7e842b1bb34e38c2de",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 79,
"avg_line_length": 41.589178356713425,
"alnum_prop": 0.6090685683997494,
"repo_name": "Workday/OpenFrame",
"id": "faf2060c9e0152f1d010376e88a921d6438e3366",
"size": "20916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/internal/backends/remote/trybot_browser_finder_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.contrib.contenttypes import generic
from mezzanine.pages.models import Page, RichText
from hs_core.models import AbstractResource, resource_processor, ResourceFile
from mezzanine.pages.page_processors import processor_for
from django.db import models
from django.contrib.contenttypes.models import ContentType
class ResourceAggregation(Page, AbstractResource, RichText):
resources = generic.GenericRelation('resource_aggregation.Resource')
def can_add(self, request):
return AbstractResource.can_add(self, request)
def can_change(self, request):
return AbstractResource.can_change(self, request)
def can_delete(self, request):
return AbstractResource.can_delete(self, request)
def can_view(self, request):
return AbstractResource.can_view(self, request)
processor_for(ResourceAggregation)(resource_processor)
class Resource(models.Model):
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = generic.GenericForeignKey('content_type', 'object_id')
resource_short_id = models.CharField(max_length=32, db_index=True) # the short_id of the resource
resource_description = models.CharField(max_length=5000, blank=True, default='')
|
{
"content_hash": "3bf5d211da9d44c7ce2355f3a5e3388b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 102,
"avg_line_length": 40.15151515151515,
"alnum_prop": 0.7358490566037735,
"repo_name": "hydroshare/hydroshare_temp",
"id": "4a3361ded02f5c208ff4ebd7ae41942c1abd4f75",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource_aggregation/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "173515"
},
{
"name": "C++",
"bytes": "4136"
},
{
"name": "CSS",
"bytes": "228598"
},
{
"name": "CoffeeScript",
"bytes": "34267"
},
{
"name": "JavaScript",
"bytes": "736373"
},
{
"name": "Python",
"bytes": "1870088"
},
{
"name": "Shell",
"bytes": "5335"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from builtins import object
from proteus import *
from proteus.default_p import *
from proteus.ctransportCoefficients import smoothedHeaviside
from math import *
try:
from .multiphase import *
except:
from multiphase import *
from proteus.mprans import CLSVOF
LevelModelType = CLSVOF.LevelModel
coefficients = CLSVOF.Coefficients(V_model=V_model,
ME_model=CLSVOF_model,
useMetrics=useMetrics,
epsFactHeaviside=epsFactHeaviside_clsvof,
epsFactDirac=epsFactHeaviside_clsvof,
lambdaFact=lambdaFact_clsvof,
outputQuantDOFs=True,
computeMetrics=computeMetrics_clsvof)
coefficients.variableNames=['phi']
name="clsvof"
#####################
# INITIAL CONDITION #
#####################
class init_cond(object):
def uOfXT(self,x,t):
return signedDistance(x)
initialConditions = {0:init_cond()}
#######################
# BOUNDARY CONDITIONS #
#######################
def getDBC_vof(x,flag):
if flag == boundaryTags['top'] and openTop:
return lambda x,t: 1.0
#
def getAFBC_vof(x,flag):
if flag != boundaryTags['top'] or not openTop:
return lambda x,t: 0.0
dirichletConditions = {0:getDBC_vof}
advectiveFluxBoundaryConditions = {0:getAFBC_vof}
diffusiveFluxBoundaryConditions = {0:{}}
|
{
"content_hash": "4c4b9919d93517644a111e14374af18f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 76,
"avg_line_length": 32.73913043478261,
"alnum_prop": 0.5916334661354582,
"repo_name": "erdc/proteus",
"id": "a0ed519a6face2dd5c771dfd7e5d2e650dffdc2c",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proteus/tests/CLSVOF/with_RANS3PF/clsvof_p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
}
|
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.tests.data_type_ops.testing_utils import OpsTestBase
class ComplexOpsTest(OpsTestBase):
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def numeric_array_pdf(self):
psers = {
"int": pd.Series([[1, 2, 3]]),
"float": pd.Series([[0.1, 0.2, 0.3]]),
"decimal": pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
}
return pd.concat(psers, axis=1)
@property
def numeric_array_psdf(self):
return ps.from_pandas(self.numeric_array_pdf)
@property
def numeric_array_df_cols(self):
return self.numeric_array_pdf.columns
@property
def non_numeric_array_pdf(self):
psers = {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
return pd.concat(psers, axis=1)
@property
def non_numeric_array_psdf(self):
return ps.from_pandas(self.non_numeric_array_pdf)
@property
def non_numeric_array_df_cols(self):
return self.non_numeric_array_pdf.columns
@property
def array_pdf(self):
return pd.concat([self.numeric_array_pdf, self.non_numeric_array_pdf], axis=1)
@property
def array_psdf(self):
return ps.from_pandas(self.array_pdf)
@property
def array_df_cols(self):
return self.array_pdf.columns
@property
def complex_pdf(self):
psers = {
"this_array": self.pser,
"that_array": pd.Series([[2, 3, 4]]),
"this_struct": pd.Series([("x", 1)]),
"that_struct": pd.Series([("a", 2)]),
}
return pd.concat(psers, axis=1)
@property
def complex_psdf(self):
pssers = {
"this_array": self.psser,
"that_array": ps.Series([[2, 3, 4]]),
"this_struct": ps.Index([("x", 1)]).to_series().reset_index(drop=True),
"that_struct": ps.Index([("a", 2)]).to_series().reset_index(drop=True),
}
return ps.concat(pssers, axis=1)
def test_add(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
self.assert_eq(pdf[col] + pdf[col], psdf[col] + psdf[col])
# Numeric array + Numeric array
for col in self.numeric_array_df_cols:
pser1, psser1 = pdf[col], psdf[col]
for other_col in self.numeric_array_df_cols:
pser2, psser2 = pdf[other_col], psdf[other_col]
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["bool"],
)
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["date"],
)
self.assertRaises(
TypeError,
lambda: psdf["bool"] + psdf["date"],
)
for col in self.non_numeric_array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser + pser, psser + psser)
# Numeric array + Non-numeric array
for numeric_col in self.numeric_array_df_cols:
for non_numeric_col in self.non_numeric_array_df_cols:
self.assertRaises(TypeError, lambda: psdf[numeric_col] + psdf[non_numeric_col])
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] - psdf[other_col])
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] * psdf[other_col])
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] / psdf[other_col])
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] // psdf[other_col])
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] % psdf[other_col])
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] ** psdf[other_col])
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
self.assert_eq(self.pser.astype(str), self.psser.astype(str))
def test_neg(self):
self.assertRaises(TypeError, lambda: -self.psser)
def test_abs(self):
self.assertRaises(TypeError, lambda: abs(self.psser))
def test_invert(self):
self.assertRaises(TypeError, lambda: ~self.psser)
def test_eq(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] == pdf["that_array"], psdf["this_array"] == psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["that_struct"], psdf["this_struct"] == psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] == pdf["this_array"], psdf["this_array"] == psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["this_struct"], psdf["this_struct"] == psdf["this_struct"]
)
def test_ne(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] != pdf["that_array"], psdf["this_array"] != psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["that_struct"], psdf["this_struct"] != psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] != pdf["this_array"], psdf["this_array"] != psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["this_struct"], psdf["this_struct"] != psdf["this_struct"]
)
def test_lt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] < pdf["that_array"], psdf["this_array"] < psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["that_struct"], psdf["this_struct"] < psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] < pdf["this_array"], psdf["this_array"] < psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["this_struct"], psdf["this_struct"] < psdf["this_struct"]
)
def test_le(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] <= pdf["that_array"], psdf["this_array"] <= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["that_struct"], psdf["this_struct"] <= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] <= pdf["this_array"], psdf["this_array"] <= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["this_struct"], psdf["this_struct"] <= psdf["this_struct"]
)
def test_gt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] > pdf["that_array"], psdf["this_array"] > psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["that_struct"], psdf["this_struct"] > psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] > pdf["this_array"], psdf["this_array"] > psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["this_struct"], psdf["this_struct"] > psdf["this_struct"]
)
def test_ge(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] >= pdf["that_array"], psdf["this_array"] >= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["that_struct"], psdf["this_struct"] >= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] >= pdf["this_array"], psdf["this_array"] >= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["this_struct"], psdf["this_struct"] >= psdf["this_struct"]
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_complex_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
{
"content_hash": "3433c47c855f4e83a9667d38f10617a2",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 99,
"avg_line_length": 35.77521613832853,
"alnum_prop": 0.5749154180763654,
"repo_name": "cloud-fan/spark",
"id": "cc9a0bf4a7430c38449efcbe03fd96b01fa47a5b",
"size": "13199",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "58061"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26338"
},
{
"name": "Dockerfile",
"bytes": "12711"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1872438"
},
{
"name": "Java",
"bytes": "4612204"
},
{
"name": "JavaScript",
"bytes": "222761"
},
{
"name": "Jupyter Notebook",
"bytes": "4310522"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "PLpgSQL",
"bytes": "352609"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "7583330"
},
{
"name": "R",
"bytes": "1273234"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "31808"
},
{
"name": "Scala",
"bytes": "42842603"
},
{
"name": "Shell",
"bytes": "240768"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "98156"
}
],
"symlink_target": ""
}
|
"""Support for powering relays in a DoorBird video doorbell."""
import datetime
import logging
from homeassistant.components.switch import SwitchEntity
import homeassistant.util.dt as dt_util
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_INFO
from .entity import DoorBirdEntity
_LOGGER = logging.getLogger(__name__)
IR_RELAY = "__ir_light__"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the DoorBird switch platform."""
entities = []
config_entry_id = config_entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation_info = hass.data[DOMAIN][config_entry_id][DOOR_STATION_INFO]
relays = doorstation_info["RELAYS"]
relays.append(IR_RELAY)
for relay in relays:
switch = DoorBirdSwitch(doorstation, doorstation_info, relay)
entities.append(switch)
async_add_entities(entities)
class DoorBirdSwitch(DoorBirdEntity, SwitchEntity):
"""A relay in a DoorBird device."""
def __init__(self, doorstation, doorstation_info, relay):
"""Initialize a relay in a DoorBird device."""
super().__init__(doorstation, doorstation_info)
self._doorstation = doorstation
self._relay = relay
self._state = False
self._assume_off = datetime.datetime.min
if relay == IR_RELAY:
self._time = datetime.timedelta(minutes=5)
else:
self._time = datetime.timedelta(seconds=5)
self._unique_id = f"{self._mac_addr}_{self._relay}"
@property
def unique_id(self):
"""Switch unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the switch."""
if self._relay == IR_RELAY:
return f"{self._doorstation.name} IR"
return f"{self._doorstation.name} Relay {self._relay}"
@property
def icon(self):
"""Return the icon to display."""
return "mdi:lightbulb" if self._relay == IR_RELAY else "mdi:dip-switch"
@property
def is_on(self):
"""Get the assumed state of the relay."""
return self._state
def turn_on(self, **kwargs):
"""Power the relay."""
if self._relay == IR_RELAY:
self._state = self._doorstation.device.turn_light_on()
else:
self._state = self._doorstation.device.energize_relay(self._relay)
now = dt_util.utcnow()
self._assume_off = now + self._time
def turn_off(self, **kwargs):
"""Turn off the relays is not needed. They are time-based."""
raise NotImplementedError("DoorBird relays cannot be manually turned off.")
def update(self):
"""Wait for the correct amount of assumed time to pass."""
if self._state and self._assume_off <= dt_util.utcnow():
self._state = False
self._assume_off = datetime.datetime.min
|
{
"content_hash": "443632cc5a6e7f58c791988b212c452c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 83,
"avg_line_length": 31.597826086956523,
"alnum_prop": 0.6308909528723771,
"repo_name": "tchellomello/home-assistant",
"id": "1e4cb81a5eb67cf0023697807175ed4e0377a669",
"size": "2907",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/doorbird/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
from leapp.compat import string_types
from leapp.dialogs import Dialog
from leapp.exceptions import MissingActorAttributeError, WrongAttributeTypeError
from leapp.models import Model
from leapp.tags import Tag
from leapp.utils.meta import get_flattened_subclasses
from leapp.models.error_severity import ErrorSeverity
class Actor(object):
"""
The Actor class represents the smallest step in the workflow. It defines what kind
of data it expects, it consumes (processes) the given data, and it produces data for other
actors in the workflow.
"""
ErrorSeverity = ErrorSeverity
""" Convenience forward for the :py:class:`leapp.models.error_severity.ErrorSeverity` constants. """
name = None
""" Name of the actor that is used to identify data or messages created by the actor. """
description = None
""" More verbose actor's description."""
consumes = ()
"""
Tuple of :py:class:`leapp.models.Model` derived classes defined in the :ref:`repositories <terminology:repository>`
that define :ref:`messages <terminology:message>` the actor consumes.
"""
produces = ()
"""
Tuple of :py:class:`leapp.models.Model` derived classes defined in the :ref:`repositories <terminology:repository>`
that define :ref:`messages <terminology:message>` the actor produces.
"""
tags = ()
"""
Tuple of :py:class:`leapp.tags.Tag` derived classes by which :ref:`workflow <terminology:workflow>`
:ref:`phases <terminology:phase>` select actors for execution.
"""
dialogs = ()
"""
Tuple of :py:class:`leapp.dialogs.dialog.Dialog` derived classes that define questions to ask the user.
Dialogs that are added to this list allow for persisting answers the user has given in the answer file storage.
"""
def __init__(self, messaging=None, logger=None):
self._messaging = messaging
self.log = (logger or logging.getLogger('leapp.actors')).getChild(self.name)
""" A configured logger instance for the current actor. """
def request_answers(self, dialog):
"""
:param dialog: Dialog instance to show
:return: dictionary with the requested answers, None if not a defined dialog
"""
if dialog in type(self).dialogs:
return self._messaging.request_answers(dialog)
return None
@property
def actor_files_paths(self):
"""
Returns the file paths that are bundled with the actor. (Path to the content of the actor's file directory).
"""
return os.getenv("LEAPP_FILES", "").split(":")
@property
def files_paths(self):
""" Returns all actor file paths related to the actor and common actors file paths. """
return self.actor_files_paths + self.common_files_paths
@property
def common_files_paths(self):
""" Returns all common repository file paths. """
return os.getenv("LEAPP_COMMON_FILES", "").split(":")
def get_folder_path(self, name):
"""
Finds the first matching folder path within :py:attr:`files_paths`.
:param name: Name of the folder
:type name: str
:return: Found folder path
:rtype: str or None
"""
for path in self.files_paths:
path = os.path.join(path, name)
if os.path.isdir(path):
return path
return None
def get_file_path(self, name):
"""
Finds the first matching file path within :py:attr:`files_paths`.
:param name: Name of the file
:type name: str
:return: Found file path
:rtype: str or None
"""
for path in self.files_paths:
path = os.path.join(path, name)
if os.path.isfile(path):
return path
return None
def run(self, *args):
""" Runs the actor calling the method :py:func:`process`. """
os.environ['LEAPP_CURRENT_ACTOR'] = self.name
try:
self.process(*args)
finally:
os.environ.pop('LEAPP_CURRENT_ACTOR', None)
def process(self, *args, **kwargs):
""" Main processing method. In inherited actors, the function needs to be defined to be able to be processed."""
raise NotImplementedError()
def produce(self, *models):
"""
By calling produce, model instances are stored as messages. Those messages can be then consumed by other actors.
:param models: Messages to be sent (those model types have to be specified in :py:attr:`produces`
:type models: Variable number of the derived classes from :py:class:`leapp.models.Model`
"""
if self._messaging:
for model in models:
if isinstance(model, type(self).produces):
self._messaging.produce(model, self)
def consume(self, *models):
"""
Retrieve messages specified in the actors :py:attr:`consumes` attribute, and filter message types by
models.
:param models: Models to use as a filter for the messages to return
:type models: Variable number of the derived classes from :py:class:`leapp.models.Model`
"""
if self._messaging:
return self._messaging.consume(self, *models)
return ()
def report_error(self, message, severity=ErrorSeverity.ERROR, details=None):
"""
Reports an execution error
:param message: A message to print the possible error
:type message: str
:param severity: Severity of the error default :py:attr:`leapp.messaging.errors.ErrorSeverity.ERROR`
:type severity: str with defined values from :py:attr:`leapp.messaging.errors.ErrorSeverity.ERROR`
:param details: A dictionary where additional context information is passed along with the error
:type details: dict
:return: None
"""
if self._messaging:
if not ErrorSeverity.validate(severity):
self.log.warning("report_error: Unknown severity value %s was passed - Falling back to ERROR", severity)
severity = ErrorSeverity.ERROR
self._messaging.report_error(
message=message,
severity=severity,
actor=self,
details=details)
def _is_type(value_type):
def validate(actor, name, value):
if not isinstance(value, value_type):
raise WrongAttributeTypeError('Actor {} attribute {} should be of the type {}'.format(actor, name,
value_type))
return value
return validate
def _is_tuple_of(value_type):
def validate(actor, name, value):
_is_type(tuple)(actor, name, value)
if not value:
raise WrongAttributeTypeError(
'Actor {} attribute {} should contain at least one item of the type {}'.format(actor, name, value_type))
if not all(map(lambda item: isinstance(item, value_type), value)):
raise WrongAttributeTypeError(
'Actor {} attribute {} should contain only values of the type {}'.format(actor, name, value_type))
return value
return validate
def _lint_warn(actor, name, type_name):
warnings = getattr(actor, '_warnings', {})
if not warnings.get(name + '_tuple'):
warnings[name + '_tuple'] = True
setattr(actor, '_warnings', warnings)
logging.getLogger("leapp.linter").warning("Actor %s field %s should be a tuple of %s", actor, name, type_name)
def _is_model_tuple(actor, name, value):
if isinstance(value, type) and issubclass(value, Model):
_lint_warn(actor, name, "Models")
value = value,
_is_type(tuple)(actor, name, value)
if not all([True] + list(map(lambda item: isinstance(item, type) and issubclass(item, Model), value))):
raise WrongAttributeTypeError(
'Actor {} attribute {} should contain only Models'.format(actor, name))
return value
def _is_dialog_tuple(actor, name, value):
if isinstance(value, Dialog):
_lint_warn(actor, name, "Dialogs")
value = value,
_is_type(tuple)(actor, name, value)
if not all([True] + list(map(lambda item: isinstance(item, Dialog), value))):
raise WrongAttributeTypeError(
'Actor {} attribute {} should contain only Dialogs'.format(actor, name))
return value
def _is_tag_tuple(actor, name, value):
if isinstance(value, type) and issubclass(value, Tag):
_lint_warn(actor, name, "Tags")
value = value,
_is_type(tuple)(actor, name, value)
if not all([True] + list(map(lambda item: isinstance(item, type) and issubclass(item, Tag), value))):
raise WrongAttributeTypeError(
'Actor {} attribute {} should contain only Tags'.format(actor, name))
return value
def _get_attribute(actor, name, validator, required=False, default_value=None, additional_info=''):
value = getattr(actor, name, None)
if not value and required:
raise MissingActorAttributeError('Actor {} is missing attribute {}.{}'.format(actor, name, additional_info))
value = validator(actor, name, value)
if not value and default_value is not None:
value = default_value
return name, value
def get_actor_metadata(actor):
"""
Creates Actor's metadata dictionary
:param actor: Actor whose metadata are needed
:type actor: derived class from :py:class:`leapp.actors.Actor`
:return: Dictionary with the name, tags, consumes, produces, and description of the actor
"""
additional_tag_info = ' At least one tag is required for actors. Please fill the tags field'
return dict([
('class_name', actor.__name__),
('path', os.path.dirname(sys.modules[actor.__module__].__file__)),
_get_attribute(actor, 'name', _is_type(string_types), required=True),
_get_attribute(actor, 'tags', _is_tag_tuple, required=True, additional_info=additional_tag_info),
_get_attribute(actor, 'consumes', _is_model_tuple, required=False, default_value=()),
_get_attribute(actor, 'produces', _is_model_tuple, required=False, default_value=()),
_get_attribute(actor, 'dialogs', _is_dialog_tuple, required=False, default_value=()),
_get_attribute(actor, 'description', _is_type(string_types), required=False,
default_value='There has been no description provided for this actor.')
])
def get_actors():
"""
:return: All registered actors with their metadata
"""
actors = get_flattened_subclasses(Actor)
for actor in actors:
get_actor_metadata(actor)
return actors
|
{
"content_hash": "abf098ae5a8e9ef7c1eb94f7a4289b51",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 120,
"avg_line_length": 38.881294964028775,
"alnum_prop": 0.6345637894347304,
"repo_name": "vinzenz/prototype",
"id": "015b011dea9bca00f766dd72eff408c4a7967d46",
"size": "10809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leapp/actors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1688"
},
{
"name": "HTML",
"bytes": "35793"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "PLpgSQL",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "290041"
},
{
"name": "Ruby",
"bytes": "1363"
},
{
"name": "Shell",
"bytes": "1416"
}
],
"symlink_target": ""
}
|
import subprocess
import json
import requests
from config import get_value, VALUE_HEROKU_TOKEN
def is_toolbelt_installed(
default_command=["heroku", "--version"],
default_test_string="heroku-toolbelt"
):
try:
p = subprocess.Popen(
default_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except OSError:
return False
version_info = p.stdout.readlines()
p.kill()
if len(version_info):
return version_info[0].startswith(default_test_string)
return False
def get_token():
return get_value(VALUE_HEROKU_TOKEN)
def get_apps():
result = requests.get(
'https://api.heroku.com/apps',
headers={'Accept': 'application/json'}
)
if result.status_code != 200:
raise IOError(
"No Status OK returned by heroku (got status=%d) " %
result.status_code
)
return json.loads(result.text)
|
{
"content_hash": "d9ee8d888b9749e6662ac6fa1e266842",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 64,
"avg_line_length": 19.73469387755102,
"alnum_prop": 0.6080661840744571,
"repo_name": "wercker/wercker-cli",
"id": "4bae68209b11308448139b76b811c0915fc3b9dc",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "werckercli/heroku.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149113"
},
{
"name": "Shell",
"bytes": "67349"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from provy import __version__
setup(
name='provy',
version=__version__,
description="provy is an easy-to-use server provisioning tool.",
long_description="provy is an easy-to-use server provisioning tool.",
keywords='provisioning devops infrastructure server',
author='Bernardo Heynemann',
author_email='[email protected]',
url='https://provy.readthedocs.org',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: System :: Installation/Setup'
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
package_data={
'': ['*.template'],
},
install_requires=[
"fabric",
"jinja2",
"configobj",
],
entry_points={
'console_scripts': [
'provy = provy.console:main',
],
},
)
|
{
"content_hash": "9e0211e507506949515b3860f6c4f1c6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 28.441860465116278,
"alnum_prop": 0.5911692559280458,
"repo_name": "python-provy/provy",
"id": "bce6fab84b564db95ece139e367389f9ecb0198e",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "608167"
},
{
"name": "Ruby",
"bytes": "912"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.