code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def solver_configuration(A, B=None, verb=True):
"""Generate a dictionary of SA parameters for an arbitray matrix A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
(n x n) matrix to invert, CSR or BSR format preferred for efficiency
B : None, array
Near null-space modes used to construct the smoothed aggregation solver
If None, the constant vector is used
If (n x m) array, then B is passed to smoothed_aggregation_solver
verb : bool
If True, print verbose output during runtime
Returns
-------
config : dict
A dictionary of solver configuration parameters that one uses to
generate a smoothed aggregation solver
Notes
-----
The config dictionary contains the following parameter entries: symmetry,
smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse,
coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each
parameter's description.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration
>>> A = poisson((40,40),format='csr')
>>> solver_config = solver_configuration(A,verb=False)
"""
# Ensure acceptable format of A
A = make_csr(A)
config = {}
# Detect symmetry
if ishermitian(A, fast_check=True):
config['symmetry'] = 'hermitian'
if verb:
print(" Detected a Hermitian matrix")
else:
config['symmetry'] = 'nonsymmetric'
if verb:
print(" Detected a non-Hermitian matrix")
# Symmetry dependent parameters
if config['symmetry'] == 'hermitian':
config['smooth'] = ('energy', {'krylov': 'cg', 'maxiter': 3,
'degree': 2, 'weighting': 'local'})
config['presmoother'] = ('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 1})
config['postsmoother'] = ('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 1})
else:
config['smooth'] = ('energy', {'krylov': 'gmres', 'maxiter': 3,
'degree': 2, 'weighting': 'local'})
config['presmoother'] = ('gauss_seidel_nr',
{'sweep': 'symmetric', 'iterations': 2})
config['postsmoother'] = ('gauss_seidel_nr',
{'sweep': 'symmetric', 'iterations': 2})
# Determine near null-space modes B
if B is None:
# B is the constant for each variable in a node
if isspmatrix_bsr(A) and A.blocksize[0] > 1:
bsize = A.blocksize[0]
config['B'] = np.kron(np.ones((int(A.shape[0] / bsize), 1),
dtype=A.dtype), np.eye(bsize))
else:
config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype)
elif (isinstance(B, type(np.zeros((1,)))) or
isinstance(B, type(sp.mat(np.zeros((1,)))))):
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0):
raise TypeError('Invalid dimensions of B, B.shape[0] must equal \
A.shape[0]')
else:
config['B'] = np.array(B, dtype=A.dtype)
else:
raise TypeError('Invalid B')
if config['symmetry'] == 'hermitian':
config['BH'] = None
else:
config['BH'] = config['B'].copy()
# Set non-symmetry related parameters
config['strength'] = ('evolution', {'k': 2, 'proj_type': 'l2',
'epsilon': 3.0})
config['max_levels'] = 15
config['max_coarse'] = 500
config['coarse_solver'] = 'pinv'
config['aggregate'] = 'standard'
config['keep'] = False
return config | Generate a dictionary of SA parameters for an arbitray matrix A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
(n x n) matrix to invert, CSR or BSR format preferred for efficiency
B : None, array
Near null-space modes used to construct the smoothed aggregation solver
If None, the constant vector is used
If (n x m) array, then B is passed to smoothed_aggregation_solver
verb : bool
If True, print verbose output during runtime
Returns
-------
config : dict
A dictionary of solver configuration parameters that one uses to
generate a smoothed aggregation solver
Notes
-----
The config dictionary contains the following parameter entries: symmetry,
smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse,
coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each
parameter's description.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration
>>> A = poisson((40,40),format='csr')
>>> solver_config = solver_configuration(A,verb=False) | Below is the the instruction that describes the task:
### Input:
Generate a dictionary of SA parameters for an arbitray matrix A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
(n x n) matrix to invert, CSR or BSR format preferred for efficiency
B : None, array
Near null-space modes used to construct the smoothed aggregation solver
If None, the constant vector is used
If (n x m) array, then B is passed to smoothed_aggregation_solver
verb : bool
If True, print verbose output during runtime
Returns
-------
config : dict
A dictionary of solver configuration parameters that one uses to
generate a smoothed aggregation solver
Notes
-----
The config dictionary contains the following parameter entries: symmetry,
smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse,
coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each
parameter's description.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration
>>> A = poisson((40,40),format='csr')
>>> solver_config = solver_configuration(A,verb=False)
### Response:
def solver_configuration(A, B=None, verb=True):
"""Generate a dictionary of SA parameters for an arbitray matrix A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
(n x n) matrix to invert, CSR or BSR format preferred for efficiency
B : None, array
Near null-space modes used to construct the smoothed aggregation solver
If None, the constant vector is used
If (n x m) array, then B is passed to smoothed_aggregation_solver
verb : bool
If True, print verbose output during runtime
Returns
-------
config : dict
A dictionary of solver configuration parameters that one uses to
generate a smoothed aggregation solver
Notes
-----
The config dictionary contains the following parameter entries: symmetry,
smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse,
coarse_solver, aggregate, keep. See smoothed_aggregtion_solver for each
parameter's description.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration
>>> A = poisson((40,40),format='csr')
>>> solver_config = solver_configuration(A,verb=False)
"""
# Ensure acceptable format of A
A = make_csr(A)
config = {}
# Detect symmetry
if ishermitian(A, fast_check=True):
config['symmetry'] = 'hermitian'
if verb:
print(" Detected a Hermitian matrix")
else:
config['symmetry'] = 'nonsymmetric'
if verb:
print(" Detected a non-Hermitian matrix")
# Symmetry dependent parameters
if config['symmetry'] == 'hermitian':
config['smooth'] = ('energy', {'krylov': 'cg', 'maxiter': 3,
'degree': 2, 'weighting': 'local'})
config['presmoother'] = ('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 1})
config['postsmoother'] = ('block_gauss_seidel',
{'sweep': 'symmetric', 'iterations': 1})
else:
config['smooth'] = ('energy', {'krylov': 'gmres', 'maxiter': 3,
'degree': 2, 'weighting': 'local'})
config['presmoother'] = ('gauss_seidel_nr',
{'sweep': 'symmetric', 'iterations': 2})
config['postsmoother'] = ('gauss_seidel_nr',
{'sweep': 'symmetric', 'iterations': 2})
# Determine near null-space modes B
if B is None:
# B is the constant for each variable in a node
if isspmatrix_bsr(A) and A.blocksize[0] > 1:
bsize = A.blocksize[0]
config['B'] = np.kron(np.ones((int(A.shape[0] / bsize), 1),
dtype=A.dtype), np.eye(bsize))
else:
config['B'] = np.ones((A.shape[0], 1), dtype=A.dtype)
elif (isinstance(B, type(np.zeros((1,)))) or
isinstance(B, type(sp.mat(np.zeros((1,)))))):
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if (B.shape[0] != A.shape[0]) or (B.shape[1] == 0):
raise TypeError('Invalid dimensions of B, B.shape[0] must equal \
A.shape[0]')
else:
config['B'] = np.array(B, dtype=A.dtype)
else:
raise TypeError('Invalid B')
if config['symmetry'] == 'hermitian':
config['BH'] = None
else:
config['BH'] = config['B'].copy()
# Set non-symmetry related parameters
config['strength'] = ('evolution', {'k': 2, 'proj_type': 'l2',
'epsilon': 3.0})
config['max_levels'] = 15
config['max_coarse'] = 500
config['coarse_solver'] = 'pinv'
config['aggregate'] = 'standard'
config['keep'] = False
return config |
def edge_lengths(self):
"""
Compute the edge-lengths of each triangle in the triangulation.
"""
simplex = self.simplices.T
# simplex is vectors a, b, c defining the corners
a = self.points[simplex[0]]
b = self.points[simplex[1]]
c = self.points[simplex[2]]
# norm to calculate length
ab = np.linalg.norm(b - a, axis=1)
bc = np.linalg.norm(c - a, axis=1)
ac = np.linalg.norm(a - c, axis=1)
return ab, bc, ac | Compute the edge-lengths of each triangle in the triangulation. | Below is the the instruction that describes the task:
### Input:
Compute the edge-lengths of each triangle in the triangulation.
### Response:
def edge_lengths(self):
"""
Compute the edge-lengths of each triangle in the triangulation.
"""
simplex = self.simplices.T
# simplex is vectors a, b, c defining the corners
a = self.points[simplex[0]]
b = self.points[simplex[1]]
c = self.points[simplex[2]]
# norm to calculate length
ab = np.linalg.norm(b - a, axis=1)
bc = np.linalg.norm(c - a, axis=1)
ac = np.linalg.norm(a - c, axis=1)
return ab, bc, ac |
def _set_ospf_level1(self, v, load=False):
"""
Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf_level1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__ospf_level1 = t
if hasattr(self, '_set'):
self._set() | Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level1() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level1() directly.
### Response:
def _set_ospf_level1(self, v, load=False):
"""
Setter method for ospf_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf_level1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ospf-level1", rest_name="level-1", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1 routes only', u'alt-name': u'level-1', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__ospf_level1 = t
if hasattr(self, '_set'):
self._set() |
def calculate_r_matrices(fine_states, reduced_matrix_elements, q=None,
numeric=True, convention=1):
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
β‘ β3β
r β€
β’ 0 0 ββββ 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ -β15β
r β15β
r β₯
β’ 0 βββββββ 0 0 0 βββββ 0 0 β₯
β’ 12 60 β₯
β’ β₯
β’ -β15β
r β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 20 β₯
β’ β₯
β’β2β
r -β6β
r β₯
β’ββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ r -r β₯
β’ 0 β 0 0 0 βββ 0 0 β₯
β’ 4 4 β₯
β’ β₯
β’ β3β
r -r β₯
β’ 0 0 ββββ 0 0 0 βββ 0 β₯
β’ 12 4 β₯
β’ β₯
β’ -β6β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ r β₯
β’ 0 0 0 β 0 0 0 0 β₯
β’ 2 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 ββββ 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 0 0 ββββ 0 β₯
β’ 10 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 30 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[1][8:,:8])
β‘ -β3β
r β€
β’ 0 ββββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’β15β
r -β5β
r β₯
β’βββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 12 20 β₯
β’ β₯
β’ -β15β
r β₯
β’ 0 0 0 0 0 βββββββ 0 0 β₯
β’ 30 β₯
β’ β₯
β’ -β15β
r -β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r β3β
r β₯
β’ β 0 0 0 ββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 ββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r -β3β
r β₯
β’ 0 0 β 0 0 0 ββββββ 0 β₯
β’ 4 12 β₯
β’ β₯
β’ -β3β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 6 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 βββββ 0 0 0 β₯
β’ 15 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 0 0 βββββ 0 β₯
β’ 15 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 0 0 0 0 ββββ β₯
β’ 6 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[2][8:,:8])
β‘β3β
r β€
β’ββββ 0 0 0 0 0 0 0β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 20 β₯
β’ β₯
β’β15β
r β5β
r β₯
β’βββββ 0 0 0 ββββ 0 0 0β₯
β’ 12 20 β₯
β’ β₯
β’ β15β
r β15β
r β₯
β’ 0 βββββ 0 0 0 βββββ 0 0β₯
β’ 12 60 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 ββββ 0 0 0 0β₯
β’ 12 β₯
β’ β₯
β’β3β
r r β₯
β’ββββ 0 0 0 β 0 0 0β₯
β’ 12 4 β₯
β’ β₯
β’ r r β₯
β’ 0 β 0 0 0 β 0 0β₯
β’ 4 4 β₯
β’ β₯
β’ β2β
r β6β
r β₯
β’ 0 0 ββββ 0 0 0 ββββ 0β₯
β’ 4 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 30 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 ββββ 0 0 0β₯
β’ 10 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0β₯
β’ 10 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 0 0 ββββ 0β₯
β’ 6 β₯
β’ β₯
β’ rβ₯
β’ 0 0 0 0 0 0 0 ββ₯
β£ 2β¦
"""
magnetic_states = make_list_of_states(fine_states, 'magnetic', verbose=0)
aux = calculate_boundaries(fine_states, magnetic_states)
index_list_fine, index_list_hyperfine = aux
Ne = len(magnetic_states)
r = [[[0 for j in range(Ne)] for i in range(Ne)] for p in range(3)]
II = fine_states[0].i
for p in [-1, 0, 1]:
for i in range(Ne):
ei = magnetic_states[i]
ii = fine_index(i, index_list_fine)
for j in range(Ne):
ej = magnetic_states[j]
jj = fine_index(j, index_list_fine)
reduced_matrix_elementij = reduced_matrix_elements[ii][jj]
if reduced_matrix_elementij != 0:
ji = ei.j; jj = ej.j
fi = ei.f; fj = ej.f
mi = ei.m; mj = ej.m
rpij = matrix_element(ji, fi, mi, jj, fj, mj,
II, reduced_matrix_elementij, p,
numeric=numeric,
convention=convention)
if q == 1:
r[p+1][i][j] = rpij*delta_lesser(i, j)
elif q == -1:
r[p+1][i][j] = rpij*delta_greater(i, j)
else:
r[p+1][i][j] = rpij
if not numeric:
r = [Matrix(ri) for ri in r]
return r | ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
β‘ β3β
r β€
β’ 0 0 ββββ 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ -β15β
r β15β
r β₯
β’ 0 βββββββ 0 0 0 βββββ 0 0 β₯
β’ 12 60 β₯
β’ β₯
β’ -β15β
r β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 20 β₯
β’ β₯
β’β2β
r -β6β
r β₯
β’ββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ r -r β₯
β’ 0 β 0 0 0 βββ 0 0 β₯
β’ 4 4 β₯
β’ β₯
β’ β3β
r -r β₯
β’ 0 0 ββββ 0 0 0 βββ 0 β₯
β’ 12 4 β₯
β’ β₯
β’ -β6β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ r β₯
β’ 0 0 0 β 0 0 0 0 β₯
β’ 2 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 ββββ 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 0 0 ββββ 0 β₯
β’ 10 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 30 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[1][8:,:8])
β‘ -β3β
r β€
β’ 0 ββββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’β15β
r -β5β
r β₯
β’βββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 12 20 β₯
β’ β₯
β’ -β15β
r β₯
β’ 0 0 0 0 0 βββββββ 0 0 β₯
β’ 30 β₯
β’ β₯
β’ -β15β
r -β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r β3β
r β₯
β’ β 0 0 0 ββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 ββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r -β3β
r β₯
β’ 0 0 β 0 0 0 ββββββ 0 β₯
β’ 4 12 β₯
β’ β₯
β’ -β3β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 6 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 βββββ 0 0 0 β₯
β’ 15 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 0 0 βββββ 0 β₯
β’ 15 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 0 0 0 0 ββββ β₯
β’ 6 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[2][8:,:8])
β‘β3β
r β€
β’ββββ 0 0 0 0 0 0 0β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 20 β₯
β’ β₯
β’β15β
r β5β
r β₯
β’βββββ 0 0 0 ββββ 0 0 0β₯
β’ 12 20 β₯
β’ β₯
β’ β15β
r β15β
r β₯
β’ 0 βββββ 0 0 0 βββββ 0 0β₯
β’ 12 60 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 ββββ 0 0 0 0β₯
β’ 12 β₯
β’ β₯
β’β3β
r r β₯
β’ββββ 0 0 0 β 0 0 0β₯
β’ 12 4 β₯
β’ β₯
β’ r r β₯
β’ 0 β 0 0 0 β 0 0β₯
β’ 4 4 β₯
β’ β₯
β’ β2β
r β6β
r β₯
β’ 0 0 ββββ 0 0 0 ββββ 0β₯
β’ 4 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 30 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 ββββ 0 0 0β₯
β’ 10 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0β₯
β’ 10 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 0 0 ββββ 0β₯
β’ 6 β₯
β’ β₯
β’ rβ₯
β’ 0 0 0 0 0 0 0 ββ₯
β£ 2β¦ | Below is the the instruction that describes the task:
### Input:
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
β‘ β3β
r β€
β’ 0 0 ββββ 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ -β15β
r β15β
r β₯
β’ 0 βββββββ 0 0 0 βββββ 0 0 β₯
β’ 12 60 β₯
β’ β₯
β’ -β15β
r β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 20 β₯
β’ β₯
β’β2β
r -β6β
r β₯
β’ββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ r -r β₯
β’ 0 β 0 0 0 βββ 0 0 β₯
β’ 4 4 β₯
β’ β₯
β’ β3β
r -r β₯
β’ 0 0 ββββ 0 0 0 βββ 0 β₯
β’ 12 4 β₯
β’ β₯
β’ -β6β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ r β₯
β’ 0 0 0 β 0 0 0 0 β₯
β’ 2 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 ββββ 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 0 0 ββββ 0 β₯
β’ 10 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 30 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[1][8:,:8])
β‘ -β3β
r β€
β’ 0 ββββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’β15β
r -β5β
r β₯
β’βββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 12 20 β₯
β’ β₯
β’ -β15β
r β₯
β’ 0 0 0 0 0 βββββββ 0 0 β₯
β’ 30 β₯
β’ β₯
β’ -β15β
r -β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r β3β
r β₯
β’ β 0 0 0 ββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 ββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r -β3β
r β₯
β’ 0 0 β 0 0 0 ββββββ 0 β₯
β’ 4 12 β₯
β’ β₯
β’ -β3β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 6 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 βββββ 0 0 0 β₯
β’ 15 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 0 0 βββββ 0 β₯
β’ 15 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 0 0 0 0 ββββ β₯
β’ 6 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[2][8:,:8])
β‘β3β
r β€
β’ββββ 0 0 0 0 0 0 0β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 20 β₯
β’ β₯
β’β15β
r β5β
r β₯
β’βββββ 0 0 0 ββββ 0 0 0β₯
β’ 12 20 β₯
β’ β₯
β’ β15β
r β15β
r β₯
β’ 0 βββββ 0 0 0 βββββ 0 0β₯
β’ 12 60 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 ββββ 0 0 0 0β₯
β’ 12 β₯
β’ β₯
β’β3β
r r β₯
β’ββββ 0 0 0 β 0 0 0β₯
β’ 12 4 β₯
β’ β₯
β’ r r β₯
β’ 0 β 0 0 0 β 0 0β₯
β’ 4 4 β₯
β’ β₯
β’ β2β
r β6β
r β₯
β’ 0 0 ββββ 0 0 0 ββββ 0β₯
β’ 4 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 30 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 ββββ 0 0 0β₯
β’ 10 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0β₯
β’ 10 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 0 0 ββββ 0β₯
β’ 6 β₯
β’ β₯
β’ rβ₯
β’ 0 0 0 0 0 0 0 ββ₯
β£ 2β¦
### Response:
def calculate_r_matrices(fine_states, reduced_matrix_elements, q=None,
numeric=True, convention=1):
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
β‘ β3β
r β€
β’ 0 0 ββββ 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ -β15β
r β15β
r β₯
β’ 0 βββββββ 0 0 0 βββββ 0 0 β₯
β’ 12 60 β₯
β’ β₯
β’ -β15β
r β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 20 β₯
β’ β₯
β’β2β
r -β6β
r β₯
β’ββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ r -r β₯
β’ 0 β 0 0 0 βββ 0 0 β₯
β’ 4 4 β₯
β’ β₯
β’ β3β
r -r β₯
β’ 0 0 ββββ 0 0 0 βββ 0 β₯
β’ 12 4 β₯
β’ β₯
β’ -β6β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ r β₯
β’ 0 0 0 β 0 0 0 0 β₯
β’ 2 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 ββββ 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 0 0 ββββ 0 β₯
β’ 10 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 30 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[1][8:,:8])
β‘ -β3β
r β€
β’ 0 ββββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’β15β
r -β5β
r β₯
β’βββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 12 20 β₯
β’ β₯
β’ -β15β
r β₯
β’ 0 0 0 0 0 βββββββ 0 0 β₯
β’ 30 β₯
β’ β₯
β’ -β15β
r -β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r β3β
r β₯
β’ β 0 0 0 ββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 ββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r -β3β
r β₯
β’ 0 0 β 0 0 0 ββββββ 0 β₯
β’ 4 12 β₯
β’ β₯
β’ -β3β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 6 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 βββββ 0 0 0 β₯
β’ 15 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 0 0 βββββ 0 β₯
β’ 15 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 0 0 0 0 ββββ β₯
β’ 6 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[2][8:,:8])
β‘β3β
r β€
β’ββββ 0 0 0 0 0 0 0β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 20 β₯
β’ β₯
β’β15β
r β5β
r β₯
β’βββββ 0 0 0 ββββ 0 0 0β₯
β’ 12 20 β₯
β’ β₯
β’ β15β
r β15β
r β₯
β’ 0 βββββ 0 0 0 βββββ 0 0β₯
β’ 12 60 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 ββββ 0 0 0 0β₯
β’ 12 β₯
β’ β₯
β’β3β
r r β₯
β’ββββ 0 0 0 β 0 0 0β₯
β’ 12 4 β₯
β’ β₯
β’ r r β₯
β’ 0 β 0 0 0 β 0 0β₯
β’ 4 4 β₯
β’ β₯
β’ β2β
r β6β
r β₯
β’ 0 0 ββββ 0 0 0 ββββ 0β₯
β’ 4 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 30 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 ββββ 0 0 0β₯
β’ 10 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0β₯
β’ 10 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 0 0 ββββ 0β₯
β’ 6 β₯
β’ β₯
β’ rβ₯
β’ 0 0 0 0 0 0 0 ββ₯
β£ 2β¦
"""
magnetic_states = make_list_of_states(fine_states, 'magnetic', verbose=0)
aux = calculate_boundaries(fine_states, magnetic_states)
index_list_fine, index_list_hyperfine = aux
Ne = len(magnetic_states)
r = [[[0 for j in range(Ne)] for i in range(Ne)] for p in range(3)]
II = fine_states[0].i
for p in [-1, 0, 1]:
for i in range(Ne):
ei = magnetic_states[i]
ii = fine_index(i, index_list_fine)
for j in range(Ne):
ej = magnetic_states[j]
jj = fine_index(j, index_list_fine)
reduced_matrix_elementij = reduced_matrix_elements[ii][jj]
if reduced_matrix_elementij != 0:
ji = ei.j; jj = ej.j
fi = ei.f; fj = ej.f
mi = ei.m; mj = ej.m
rpij = matrix_element(ji, fi, mi, jj, fj, mj,
II, reduced_matrix_elementij, p,
numeric=numeric,
convention=convention)
if q == 1:
r[p+1][i][j] = rpij*delta_lesser(i, j)
elif q == -1:
r[p+1][i][j] = rpij*delta_greater(i, j)
else:
r[p+1][i][j] = rpij
if not numeric:
r = [Matrix(ri) for ri in r]
return r |
def _validated(self, data):
"""Convert data or die trying."""
try:
return self.convert(data)
except (TypeError, ValueError) as ex:
raise NotValid(*ex.args) | Convert data or die trying. | Below is the the instruction that describes the task:
### Input:
Convert data or die trying.
### Response:
def _validated(self, data):
"""Convert data or die trying."""
try:
return self.convert(data)
except (TypeError, ValueError) as ex:
raise NotValid(*ex.args) |
def getExceptionClass(errorCode):
"""
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
"""
classMap = {}
for name, class_ in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(class_) and issubclass(class_, BaseServerException):
classMap[class_.getErrorCode()] = class_
return classMap[errorCode] | Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found. | Below is the the instruction that describes the task:
### Input:
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
### Response:
def getExceptionClass(errorCode):
"""
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
"""
classMap = {}
for name, class_ in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(class_) and issubclass(class_, BaseServerException):
classMap[class_.getErrorCode()] = class_
return classMap[errorCode] |
def store_value(self, name, value, parameters=None):
"""Stores the value of a certain variable
The value of a variable with name 'name' is stored together with the parameters that were used for the
calculation.
:param str name: The name of the variable
:param value: The value to be cached
:param dict parameters: The parameters on which the value depends
"""
if not isinstance(parameters, dict):
raise TypeError("parameters must be a dict")
hash = self._parameter_hash(parameters)
if name not in self._cache:
self._cache[name] = {}
self._cache[name][hash.hexdigest()] = value | Stores the value of a certain variable
The value of a variable with name 'name' is stored together with the parameters that were used for the
calculation.
:param str name: The name of the variable
:param value: The value to be cached
:param dict parameters: The parameters on which the value depends | Below is the the instruction that describes the task:
### Input:
Stores the value of a certain variable
The value of a variable with name 'name' is stored together with the parameters that were used for the
calculation.
:param str name: The name of the variable
:param value: The value to be cached
:param dict parameters: The parameters on which the value depends
### Response:
def store_value(self, name, value, parameters=None):
"""Stores the value of a certain variable
The value of a variable with name 'name' is stored together with the parameters that were used for the
calculation.
:param str name: The name of the variable
:param value: The value to be cached
:param dict parameters: The parameters on which the value depends
"""
if not isinstance(parameters, dict):
raise TypeError("parameters must be a dict")
hash = self._parameter_hash(parameters)
if name not in self._cache:
self._cache[name] = {}
self._cache[name][hash.hexdigest()] = value |
def _extract_blocks(x, block_h, block_w):
"""Helper function for local 2d attention.
Args:
x: a [batch, height, width, depth] tensor
block_h: An integer. block height
block_w: An inteter. block width
returns:
a [batch, num_heads, height/block_h, width/block_w, depth] tensor
"""
(_, height, width, depth) = common_layers.shape_list(x)
assert height % block_h == 0
assert width % block_w == 0
x = tf.reshape(x, [-1, height//block_h, block_h,
width//block_w, block_w, depth])
return tf.transpose(x, [0, 1, 3, 2, 4, 5]) | Helper function for local 2d attention.
Args:
x: a [batch, height, width, depth] tensor
block_h: An integer. block height
block_w: An inteter. block width
returns:
a [batch, num_heads, height/block_h, width/block_w, depth] tensor | Below is the the instruction that describes the task:
### Input:
Helper function for local 2d attention.
Args:
x: a [batch, height, width, depth] tensor
block_h: An integer. block height
block_w: An inteter. block width
returns:
a [batch, num_heads, height/block_h, width/block_w, depth] tensor
### Response:
def _extract_blocks(x, block_h, block_w):
"""Helper function for local 2d attention.
Args:
x: a [batch, height, width, depth] tensor
block_h: An integer. block height
block_w: An inteter. block width
returns:
a [batch, num_heads, height/block_h, width/block_w, depth] tensor
"""
(_, height, width, depth) = common_layers.shape_list(x)
assert height % block_h == 0
assert width % block_w == 0
x = tf.reshape(x, [-1, height//block_h, block_h,
width//block_w, block_w, depth])
return tf.transpose(x, [0, 1, 3, 2, 4, 5]) |
def get_next_colour():
"""
Gets the next colour in the Geckoboard colour list.
"""
colour = settings.GECKOBOARD_COLOURS[get_next_colour.cur_colour]
get_next_colour.cur_colour += 1
if get_next_colour.cur_colour >= len(settings.GECKOBOARD_COLOURS):
get_next_colour.cur_colour = 0
return colour | Gets the next colour in the Geckoboard colour list. | Below is the the instruction that describes the task:
### Input:
Gets the next colour in the Geckoboard colour list.
### Response:
def get_next_colour():
"""
Gets the next colour in the Geckoboard colour list.
"""
colour = settings.GECKOBOARD_COLOURS[get_next_colour.cur_colour]
get_next_colour.cur_colour += 1
if get_next_colour.cur_colour >= len(settings.GECKOBOARD_COLOURS):
get_next_colour.cur_colour = 0
return colour |
def ipv6(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True``
"""
if not value and allow_empty is False:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not isinstance(value, str):
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
value = value.lower().strip()
is_valid = IPV6_REGEX.match(value)
if not is_valid:
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
return value | Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True`` | Below is the the instruction that describes the task:
### Input:
Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True``
### Response:
def ipv6(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a valid IP address version 6.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises InvalidIPAddressError: if ``value`` is not a valid IP version 6 address or
empty with ``allow_empty`` is not set to ``True``
"""
if not value and allow_empty is False:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not isinstance(value, str):
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
value = value.lower().strip()
is_valid = IPV6_REGEX.match(value)
if not is_valid:
raise errors.InvalidIPAddressError('value (%s) is not a valid ipv6' % value)
return value |
def idem(cls, ops, kwargs):
"""Remove duplicate arguments and order them via the cls's order_key key
object/function.
E.g.::
>>> class Set(Operation):
... order_key = lambda val: val
... simplifications = [idem, ]
>>> Set.create(1,2,3,1,3)
Set(1, 2, 3)
"""
return sorted(set(ops), key=cls.order_key), kwargs | Remove duplicate arguments and order them via the cls's order_key key
object/function.
E.g.::
>>> class Set(Operation):
... order_key = lambda val: val
... simplifications = [idem, ]
>>> Set.create(1,2,3,1,3)
Set(1, 2, 3) | Below is the the instruction that describes the task:
### Input:
Remove duplicate arguments and order them via the cls's order_key key
object/function.
E.g.::
>>> class Set(Operation):
... order_key = lambda val: val
... simplifications = [idem, ]
>>> Set.create(1,2,3,1,3)
Set(1, 2, 3)
### Response:
def idem(cls, ops, kwargs):
"""Remove duplicate arguments and order them via the cls's order_key key
object/function.
E.g.::
>>> class Set(Operation):
... order_key = lambda val: val
... simplifications = [idem, ]
>>> Set.create(1,2,3,1,3)
Set(1, 2, 3)
"""
return sorted(set(ops), key=cls.order_key), kwargs |
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
"""
node_data = make_osm_query(build_node_query(
lat_min, lng_min, lat_max, lng_max, tags=tags))
if len(node_data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = [process_node(n) for n in node_data['elements']]
return pd.DataFrame.from_records(nodes, index='id') | Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs. | Below is the the instruction that describes the task:
### Input:
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
### Response:
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
"""
node_data = make_osm_query(build_node_query(
lat_min, lng_min, lat_max, lng_max, tags=tags))
if len(node_data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = [process_node(n) for n in node_data['elements']]
return pd.DataFrame.from_records(nodes, index='id') |
def get_web_session_cookies(self):
"""Get web authentication cookies via WebAPI's ``AuthenticateUser``
.. note::
The cookies are valid only while :class:`.SteamClient` instance is logged on.
:return: dict with authentication cookies
:rtype: :class:`dict`, :class:`None`
"""
if not self.logged_on: return None
resp = self.send_job_and_wait(MsgProto(EMsg.ClientRequestWebAPIAuthenticateUserNonce), timeout=7)
if resp is None: return None
skey, ekey = generate_session_key()
data = {
'steamid': self.steam_id,
'sessionkey': ekey,
'encrypted_loginkey': symmetric_encrypt(resp.webapi_authenticate_user_nonce.encode('ascii'), skey),
}
try:
resp = webapi.post('ISteamUserAuth', 'AuthenticateUser', 1, params=data)
except Exception as exp:
self._LOG.debug("get_web_session_cookies error: %s" % str(exp))
return None
return {
'steamLogin': resp['authenticateuser']['token'],
'steamLoginSecure': resp['authenticateuser']['tokensecure'],
} | Get web authentication cookies via WebAPI's ``AuthenticateUser``
.. note::
The cookies are valid only while :class:`.SteamClient` instance is logged on.
:return: dict with authentication cookies
:rtype: :class:`dict`, :class:`None` | Below is the the instruction that describes the task:
### Input:
Get web authentication cookies via WebAPI's ``AuthenticateUser``
.. note::
The cookies are valid only while :class:`.SteamClient` instance is logged on.
:return: dict with authentication cookies
:rtype: :class:`dict`, :class:`None`
### Response:
def get_web_session_cookies(self):
"""Get web authentication cookies via WebAPI's ``AuthenticateUser``
.. note::
The cookies are valid only while :class:`.SteamClient` instance is logged on.
:return: dict with authentication cookies
:rtype: :class:`dict`, :class:`None`
"""
if not self.logged_on: return None
resp = self.send_job_and_wait(MsgProto(EMsg.ClientRequestWebAPIAuthenticateUserNonce), timeout=7)
if resp is None: return None
skey, ekey = generate_session_key()
data = {
'steamid': self.steam_id,
'sessionkey': ekey,
'encrypted_loginkey': symmetric_encrypt(resp.webapi_authenticate_user_nonce.encode('ascii'), skey),
}
try:
resp = webapi.post('ISteamUserAuth', 'AuthenticateUser', 1, params=data)
except Exception as exp:
self._LOG.debug("get_web_session_cookies error: %s" % str(exp))
return None
return {
'steamLogin': resp['authenticateuser']['token'],
'steamLoginSecure': resp['authenticateuser']['tokensecure'],
} |
def find_one(self, filter=None, *args, **kwargs):
"""Get a single file from gridfs.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
or ``None`` if no matching file is found. For example::
file = fs.find_one({"filename": "lisa.txt"})
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ``"_id"`` in the file collection.
- `*args` (optional): any additional positional arguments are
the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
"""
if filter is not None and not isinstance(filter, Mapping):
filter = {"_id": filter}
for f in self.find(filter, *args, **kwargs):
return f
return None | Get a single file from gridfs.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
or ``None`` if no matching file is found. For example::
file = fs.find_one({"filename": "lisa.txt"})
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ``"_id"`` in the file collection.
- `*args` (optional): any additional positional arguments are
the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`. | Below is the the instruction that describes the task:
### Input:
Get a single file from gridfs.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
or ``None`` if no matching file is found. For example::
file = fs.find_one({"filename": "lisa.txt"})
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ``"_id"`` in the file collection.
- `*args` (optional): any additional positional arguments are
the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
### Response:
def find_one(self, filter=None, *args, **kwargs):
"""Get a single file from gridfs.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
or ``None`` if no matching file is found. For example::
file = fs.find_one({"filename": "lisa.txt"})
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ``"_id"`` in the file collection.
- `*args` (optional): any additional positional arguments are
the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
"""
if filter is not None and not isinstance(filter, Mapping):
filter = {"_id": filter}
for f in self.find(filter, *args, **kwargs):
return f
return None |
def tile_is_valid(self):
""" Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool
"""
return self.tile_info is not None \
and (self.datetime == self.date or self.datetime == self.parse_datetime(self.tile_info['timestamp'])) | Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool
### Response:
def tile_is_valid(self):
""" Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool
"""
return self.tile_info is not None \
and (self.datetime == self.date or self.datetime == self.parse_datetime(self.tile_info['timestamp'])) |
def bump_context(self, name):
"""Causes the context's tools to take priority over all others."""
data = self._context(name)
data["priority"] = self._next_priority
self._flush_tools() | Causes the context's tools to take priority over all others. | Below is the the instruction that describes the task:
### Input:
Causes the context's tools to take priority over all others.
### Response:
def bump_context(self, name):
"""Causes the context's tools to take priority over all others."""
data = self._context(name)
data["priority"] = self._next_priority
self._flush_tools() |
def is_none(entity, prop, name):
"bool: True if the value of a property is None."
return is_not_empty(entity, prop, name) and getattr(entity, name) is None | bool: True if the value of a property is None. | Below is the the instruction that describes the task:
### Input:
bool: True if the value of a property is None.
### Response:
def is_none(entity, prop, name):
"bool: True if the value of a property is None."
return is_not_empty(entity, prop, name) and getattr(entity, name) is None |
def add_geo_facet(self, *args, **kwargs):
"""Add a geo factory facet"""
self.facets.append(GeoDistanceFacet(*args, **kwargs)) | Add a geo factory facet | Below is the the instruction that describes the task:
### Input:
Add a geo factory facet
### Response:
def add_geo_facet(self, *args, **kwargs):
"""Add a geo factory facet"""
self.facets.append(GeoDistanceFacet(*args, **kwargs)) |
def abort(self, msgObj):
"""
Disconnect all signals and turn macro processing in the event
handler back on.
"""
self.qteMain.qtesigKeyparsed.disconnect(self.qteKeyPress)
self.qteMain.qtesigAbort.disconnect(self.abort)
self.qteActive = False
self.qteMain.qteEnableMacroProcessing() | Disconnect all signals and turn macro processing in the event
handler back on. | Below is the the instruction that describes the task:
### Input:
Disconnect all signals and turn macro processing in the event
handler back on.
### Response:
def abort(self, msgObj):
"""
Disconnect all signals and turn macro processing in the event
handler back on.
"""
self.qteMain.qtesigKeyparsed.disconnect(self.qteKeyPress)
self.qteMain.qtesigAbort.disconnect(self.abort)
self.qteActive = False
self.qteMain.qteEnableMacroProcessing() |
def complete(self, filepath):
'''
Marks the item as complete by moving it to the done directory and optionally gzipping it.
'''
if not os.path.exists(filepath):
raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath))
if self._devel: self.logger.debug("Completing - {} ".format(filepath))
if self.rotate_complete:
try:
complete_dir = str(self.rotate_complete())
except Exception as e:
self.logger.error("rotate_complete function failed with the following exception.")
self.logger.exception(e)
raise
newdir = os.path.join(self._done_dir, complete_dir)
newpath = os.path.join(newdir, os.path.split(filepath)[-1] )
if not os.path.isdir(newdir):
self.logger.debug("Making new directory: {}".format(newdir))
os.makedirs(newdir)
else:
newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] )
try:
if self._compress_complete:
if not filepath.endswith('.gz'):
# Compressing complete, but existing file not compressed
# Compress and move it and kick out
newpath += '.gz'
self._compress_and_move(filepath, newpath)
return newpath
# else the file is already compressed and can just be moved
#if not compressing completed file, just move it
shutil.move(filepath, newpath)
self.logger.info(" Completed - {}".format(filepath))
except Exception as e:
self.logger.error("Couldn't Complete {}".format(filepath))
self.logger.exception(e)
raise
return newpath | Marks the item as complete by moving it to the done directory and optionally gzipping it. | Below is the the instruction that describes the task:
### Input:
Marks the item as complete by moving it to the done directory and optionally gzipping it.
### Response:
def complete(self, filepath):
'''
Marks the item as complete by moving it to the done directory and optionally gzipping it.
'''
if not os.path.exists(filepath):
raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath))
if self._devel: self.logger.debug("Completing - {} ".format(filepath))
if self.rotate_complete:
try:
complete_dir = str(self.rotate_complete())
except Exception as e:
self.logger.error("rotate_complete function failed with the following exception.")
self.logger.exception(e)
raise
newdir = os.path.join(self._done_dir, complete_dir)
newpath = os.path.join(newdir, os.path.split(filepath)[-1] )
if not os.path.isdir(newdir):
self.logger.debug("Making new directory: {}".format(newdir))
os.makedirs(newdir)
else:
newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] )
try:
if self._compress_complete:
if not filepath.endswith('.gz'):
# Compressing complete, but existing file not compressed
# Compress and move it and kick out
newpath += '.gz'
self._compress_and_move(filepath, newpath)
return newpath
# else the file is already compressed and can just be moved
#if not compressing completed file, just move it
shutil.move(filepath, newpath)
self.logger.info(" Completed - {}".format(filepath))
except Exception as e:
self.logger.error("Couldn't Complete {}".format(filepath))
self.logger.exception(e)
raise
return newpath |
def _bddnode(root, lo, hi):
"""Return a unique BDD node."""
if lo is hi:
node = lo
else:
key = (root, lo, hi)
try:
node = _NODES[key]
except KeyError:
node = _NODES[key] = BDDNode(*key)
return node | Return a unique BDD node. | Below is the the instruction that describes the task:
### Input:
Return a unique BDD node.
### Response:
def _bddnode(root, lo, hi):
"""Return a unique BDD node."""
if lo is hi:
node = lo
else:
key = (root, lo, hi)
try:
node = _NODES[key]
except KeyError:
node = _NODES[key] = BDDNode(*key)
return node |
def do_reload(self, args):
"""Reload a module in to the framework"""
if args.module is not None:
if args.module not in self.frmwk.modules:
self.print_error('Invalid Module Selected.')
return
module = self.frmwk.modules[args.module]
elif self.frmwk.current_module:
module = self.frmwk.current_module
else:
self.print_error('Must \'use\' module first')
return
self.reload_module(module) | Reload a module in to the framework | Below is the the instruction that describes the task:
### Input:
Reload a module in to the framework
### Response:
def do_reload(self, args):
"""Reload a module in to the framework"""
if args.module is not None:
if args.module not in self.frmwk.modules:
self.print_error('Invalid Module Selected.')
return
module = self.frmwk.modules[args.module]
elif self.frmwk.current_module:
module = self.frmwk.current_module
else:
self.print_error('Must \'use\' module first')
return
self.reload_module(module) |
def parallel_update_objectinfo_cplist(
cplist,
liststartindex=None,
maxobjects=None,
nworkers=NCPUS,
fast_mode=False,
findercmap='gray_r',
finderconvolve=None,
deredden_object=True,
custom_bandpasses=None,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True
):
'''
This updates objectinfo for a list of checkplots.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplots
if they exist:
comments
varinfo
objectinfo.objecttags
Parameters
----------
cplist : list of str
A list of checkplot pickle file names to update.
liststartindex : int
The index of the input list to start working at.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input checkplot pickles over several sessions or machines.
nworkers : int
The number of parallel workers that will work on the checkplot
update process.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
`checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this
works. If this is True, will run in "fast" mode with default timeouts (5
seconds in most cases). If this is a float, will run in "fast" mode with
the provided timeout value in seconds.
findercmap : str or matplotlib.cm.Colormap object
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
list of str
Paths to the updated checkplot pickle file.
'''
# work around the Darwin segfault after fork if no network activity in
# main thread bug: https://bugs.python.org/issue30385#msg293958
if sys.platform == 'darwin':
import requests
requests.get('http://captive.apple.com/hotspot-detect.html')
# handle the start and end indices
if (liststartindex is not None) and (maxobjects is None):
cplist = cplist[liststartindex:]
elif (liststartindex is None) and (maxobjects is not None):
cplist = cplist[:maxobjects]
elif (liststartindex is not None) and (maxobjects is not None):
cplist = (
cplist[liststartindex:liststartindex+maxobjects]
)
tasks = [(x, {'fast_mode':fast_mode,
'findercmap':findercmap,
'finderconvolve':finderconvolve,
'deredden_object':deredden_object,
'custom_bandpasses':custom_bandpasses,
'gaia_submit_timeout':gaia_submit_timeout,
'gaia_submit_tries':gaia_submit_tries,
'gaia_max_timeout':gaia_max_timeout,
'gaia_mirror':gaia_mirror,
'complete_query_later':complete_query_later,
'lclistpkl':lclistpkl,
'nbrradiusarcsec':nbrradiusarcsec,
'maxnumneighbors':maxnumneighbors,
'plotdpi':plotdpi,
'findercachedir':findercachedir,
'verbose':verbose}) for x in cplist]
resultfutures = []
results = []
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(cp_objectinfo_worker, tasks)
results = [x for x in resultfutures]
executor.shutdown()
return results | This updates objectinfo for a list of checkplots.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplots
if they exist:
comments
varinfo
objectinfo.objecttags
Parameters
----------
cplist : list of str
A list of checkplot pickle file names to update.
liststartindex : int
The index of the input list to start working at.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input checkplot pickles over several sessions or machines.
nworkers : int
The number of parallel workers that will work on the checkplot
update process.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
`checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this
works. If this is True, will run in "fast" mode with default timeouts (5
seconds in most cases). If this is a float, will run in "fast" mode with
the provided timeout value in seconds.
findercmap : str or matplotlib.cm.Colormap object
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
list of str
Paths to the updated checkplot pickle file. | Below is the the instruction that describes the task:
### Input:
This updates objectinfo for a list of checkplots.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplots
if they exist:
comments
varinfo
objectinfo.objecttags
Parameters
----------
cplist : list of str
A list of checkplot pickle file names to update.
liststartindex : int
The index of the input list to start working at.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input checkplot pickles over several sessions or machines.
nworkers : int
The number of parallel workers that will work on the checkplot
update process.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
`checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this
works. If this is True, will run in "fast" mode with default timeouts (5
seconds in most cases). If this is a float, will run in "fast" mode with
the provided timeout value in seconds.
findercmap : str or matplotlib.cm.Colormap object
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
list of str
Paths to the updated checkplot pickle file.
### Response:
def parallel_update_objectinfo_cplist(
cplist,
liststartindex=None,
maxobjects=None,
nworkers=NCPUS,
fast_mode=False,
findercmap='gray_r',
finderconvolve=None,
deredden_object=True,
custom_bandpasses=None,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True
):
'''
This updates objectinfo for a list of checkplots.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplots
if they exist:
comments
varinfo
objectinfo.objecttags
Parameters
----------
cplist : list of str
A list of checkplot pickle file names to update.
liststartindex : int
The index of the input list to start working at.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input checkplot pickles over several sessions or machines.
nworkers : int
The number of parallel workers that will work on the checkplot
update process.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
`checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this
works. If this is True, will run in "fast" mode with default timeouts (5
seconds in most cases). If this is a float, will run in "fast" mode with
the provided timeout value in seconds.
findercmap : str or matplotlib.cm.Colormap object
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
list of str
Paths to the updated checkplot pickle file.
'''
# work around the Darwin segfault after fork if no network activity in
# main thread bug: https://bugs.python.org/issue30385#msg293958
if sys.platform == 'darwin':
import requests
requests.get('http://captive.apple.com/hotspot-detect.html')
# handle the start and end indices
if (liststartindex is not None) and (maxobjects is None):
cplist = cplist[liststartindex:]
elif (liststartindex is None) and (maxobjects is not None):
cplist = cplist[:maxobjects]
elif (liststartindex is not None) and (maxobjects is not None):
cplist = (
cplist[liststartindex:liststartindex+maxobjects]
)
tasks = [(x, {'fast_mode':fast_mode,
'findercmap':findercmap,
'finderconvolve':finderconvolve,
'deredden_object':deredden_object,
'custom_bandpasses':custom_bandpasses,
'gaia_submit_timeout':gaia_submit_timeout,
'gaia_submit_tries':gaia_submit_tries,
'gaia_max_timeout':gaia_max_timeout,
'gaia_mirror':gaia_mirror,
'complete_query_later':complete_query_later,
'lclistpkl':lclistpkl,
'nbrradiusarcsec':nbrradiusarcsec,
'maxnumneighbors':maxnumneighbors,
'plotdpi':plotdpi,
'findercachedir':findercachedir,
'verbose':verbose}) for x in cplist]
resultfutures = []
results = []
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(cp_objectinfo_worker, tasks)
results = [x for x in resultfutures]
executor.shutdown()
return results |
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
# SolveBio: we're only displaying Key-Value tuples (dimension of 2).
# enforce that we don't wrap lines by setting a max
# limit on row width which is equal to TTY_COLS (see printing)
rendered_cells = (begin + sep.join(padded_cells) + end).rstrip()
if len(rendered_cells) > TTY_COLS:
if not cells[-1].endswith(" ") and not cells[-1].endswith("-"):
terminating_str = " ... "
else:
terminating_str = ""
rendered_cells = "{0}{1}{2}".format(
rendered_cells[:TTY_COLS - len(terminating_str) - 1],
terminating_str, end)
return rendered_cells | Return a string which represents a row of data cells. | Below is the the instruction that describes the task:
### Input:
Return a string which represents a row of data cells.
### Response:
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
# SolveBio: we're only displaying Key-Value tuples (dimension of 2).
# enforce that we don't wrap lines by setting a max
# limit on row width which is equal to TTY_COLS (see printing)
rendered_cells = (begin + sep.join(padded_cells) + end).rstrip()
if len(rendered_cells) > TTY_COLS:
if not cells[-1].endswith(" ") and not cells[-1].endswith("-"):
terminating_str = " ... "
else:
terminating_str = ""
rendered_cells = "{0}{1}{2}".format(
rendered_cells[:TTY_COLS - len(terminating_str) - 1],
terminating_str, end)
return rendered_cells |
def _construct_lambda_layer(self, intrinsics_resolver):
"""Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list
"""
# Resolve intrinsics if applicable:
self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName')
self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo')
self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description')
self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy,
'RetentionPolicy')
retention_policy_value = self._get_retention_policy_value()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
attributes['DeletionPolicy'] = retention_policy_value
old_logical_id = self.logical_id
new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen()
self.logical_id = new_logical_id
lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes)
# Changing the LayerName property: when a layer is published, it is given an Arn
# example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1
# where MyLayer is the LayerName property if it exists; otherwise, it is the
# LogicalId of this resource. Since a LayerVersion is an immutable resource, when
# CloudFormation updates this resource, it will ALWAYS create a new version then
# delete the old version if the logical ids match. What this does is change the
# logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the
# LayerName property of the layer so that the Arn will still always be the same
# with the exception of an incrementing version number.
if not self.LayerName:
self.LayerName = old_logical_id
lambda_layer.LayerName = self.LayerName
lambda_layer.Description = self.Description
lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri')
lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes
lambda_layer.LicenseInfo = self.LicenseInfo
return lambda_layer | Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list | Below is the the instruction that describes the task:
### Input:
Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list
### Response:
def _construct_lambda_layer(self, intrinsics_resolver):
"""Constructs and returns the Lambda function.
:returns: a list containing the Lambda function and execution role resources
:rtype: list
"""
# Resolve intrinsics if applicable:
self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName')
self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo')
self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description')
self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy,
'RetentionPolicy')
retention_policy_value = self._get_retention_policy_value()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
attributes['DeletionPolicy'] = retention_policy_value
old_logical_id = self.logical_id
new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen()
self.logical_id = new_logical_id
lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes)
# Changing the LayerName property: when a layer is published, it is given an Arn
# example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1
# where MyLayer is the LayerName property if it exists; otherwise, it is the
# LogicalId of this resource. Since a LayerVersion is an immutable resource, when
# CloudFormation updates this resource, it will ALWAYS create a new version then
# delete the old version if the logical ids match. What this does is change the
# logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the
# LayerName property of the layer so that the Arn will still always be the same
# with the exception of an incrementing version number.
if not self.LayerName:
self.LayerName = old_logical_id
lambda_layer.LayerName = self.LayerName
lambda_layer.Description = self.Description
lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri')
lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes
lambda_layer.LicenseInfo = self.LicenseInfo
return lambda_layer |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict |
def decide(self):
"""
Decides the next command to be launched based on the current state.
:return: Tuple containing the next command name, and it's parameters.
"""
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']])
param = ''
if next_command_name == 'retrieve':
param = random.choice(self.state['file_list'])
elif next_command_name == 'cwd':
param = random.choice(self.state['dir_list'])
return next_command_name, param | Decides the next command to be launched based on the current state.
:return: Tuple containing the next command name, and it's parameters. | Below is the the instruction that describes the task:
### Input:
Decides the next command to be launched based on the current state.
:return: Tuple containing the next command name, and it's parameters.
### Response:
def decide(self):
"""
Decides the next command to be launched based on the current state.
:return: Tuple containing the next command name, and it's parameters.
"""
next_command_name = random.choice(self.COMMAND_MAP[self.state['last_command']])
param = ''
if next_command_name == 'retrieve':
param = random.choice(self.state['file_list'])
elif next_command_name == 'cwd':
param = random.choice(self.state['dir_list'])
return next_command_name, param |
def setFormat(self, start, count, format):
""" Reimplemented to highlight selectively.
"""
start += self._current_offset
super(FrontendHighlighter, self).setFormat(start, count, format) | Reimplemented to highlight selectively. | Below is the the instruction that describes the task:
### Input:
Reimplemented to highlight selectively.
### Response:
def setFormat(self, start, count, format):
""" Reimplemented to highlight selectively.
"""
start += self._current_offset
super(FrontendHighlighter, self).setFormat(start, count, format) |
def _ssh_forward_accept(ssh_session, timeout_ms):
"""Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received.
"""
ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session),
c_int(timeout_ms))
if ssh_channel is None:
raise SshTimeoutException()
return ssh_channel | Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received. | Below is the the instruction that describes the task:
### Input:
Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received.
### Response:
def _ssh_forward_accept(ssh_session, timeout_ms):
"""Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received.
"""
ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session),
c_int(timeout_ms))
if ssh_channel is None:
raise SshTimeoutException()
return ssh_channel |
def xpathNextSelf(self, ctxt):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Traversal function for the "self" direction The self axis
contains just the context node itself | Below is the the instruction that describes the task:
### Input:
Traversal function for the "self" direction The self axis
contains just the context node itself
### Response:
def xpathNextSelf(self, ctxt):
"""Traversal function for the "self" direction The self axis
contains just the context node itself """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextSelf(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextSelf() failed')
__tmp = xmlNode(_obj=ret)
return __tmp |
def clean_notify(self):
"""
Clean the notify_on_enrollment field.
"""
return self.cleaned_data.get(self.Fields.NOTIFY, self.NotificationTypes.DEFAULT) | Clean the notify_on_enrollment field. | Below is the the instruction that describes the task:
### Input:
Clean the notify_on_enrollment field.
### Response:
def clean_notify(self):
"""
Clean the notify_on_enrollment field.
"""
return self.cleaned_data.get(self.Fields.NOTIFY, self.NotificationTypes.DEFAULT) |
def process_inputs(self):
"""
Processes input data
:return:
"""
ret = []
files = self.args.files
if files is None:
return ret
for fname in files:
if fname == '-':
if self.args.base64stdin:
for line in sys.stdin:
data = base64.b64decode(line)
ret.append(self.process_file(data, fname))
continue
else:
fh = sys.stdin
elif fname.endswith('.tar') or fname.endswith('.tar.gz'):
sub = self.process_tar(fname)
ret.append(sub)
continue
elif not os.path.isfile(fname):
sub = self.process_dir(fname)
ret.append(sub)
continue
else:
fh = open(fname, 'rb')
with fh:
data = fh.read()
sub = self.process_file(data, fname)
ret.append(sub)
return ret | Processes input data
:return: | Below is the the instruction that describes the task:
### Input:
Processes input data
:return:
### Response:
def process_inputs(self):
"""
Processes input data
:return:
"""
ret = []
files = self.args.files
if files is None:
return ret
for fname in files:
if fname == '-':
if self.args.base64stdin:
for line in sys.stdin:
data = base64.b64decode(line)
ret.append(self.process_file(data, fname))
continue
else:
fh = sys.stdin
elif fname.endswith('.tar') or fname.endswith('.tar.gz'):
sub = self.process_tar(fname)
ret.append(sub)
continue
elif not os.path.isfile(fname):
sub = self.process_dir(fname)
ret.append(sub)
continue
else:
fh = open(fname, 'rb')
with fh:
data = fh.read()
sub = self.process_file(data, fname)
ret.append(sub)
return ret |
def _delete(collection_name, spec, opts, flags):
"""Get an OP_DELETE message."""
encoded = _dict_to_bson(spec, False, opts) # Uses extensions.
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(flags),
encoded]), len(encoded) | Get an OP_DELETE message. | Below is the the instruction that describes the task:
### Input:
Get an OP_DELETE message.
### Response:
def _delete(collection_name, spec, opts, flags):
"""Get an OP_DELETE message."""
encoded = _dict_to_bson(spec, False, opts) # Uses extensions.
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(flags),
encoded]), len(encoded) |
def dataCollections(self,
countryName=None,
addDerivativeVariables=None,
outFields=None,
suppressNullValues=False):
"""
The GeoEnrichment service uses the concept of a data collection to
define the data attributes returned by the enrichment service. Each
data collection has a unique name that acts as an ID that is passed
in the dataCollections parameter of the GeoEnrichment service.
Some data collections (such as default) can be used in all
supported countries. Other data collections may only be available
in one or a collection of countries. Data collections may only be
available in a subset of countries because of differences in the
demographic data that is available for each country. A list of data
collections for all available countries can be generated with the
data collection discover method.
For full help please go here:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/
Inputs:
countryName - lets the user supply and optional name of a
country in order to get information about the data collections
in that given country.
addDerivativeVariables - Optional parameter to specify a list of
field names that include variables for the derivative
statistics.
outFields - Optional parameter to specify a list of output
fields in the response.
suppressNullValues - Optional parameter to return only values
that are not NULL in the output response. Adding the optional
suppressNullValues parameter to any data collections discovery
method will reduce the size of the output that is returned
"""
if addDerivativeVariables is None:
addDerivativeVariables = ["*"]
if outFields is None:
outFields = ["*"]
if countryName is None:
url = self._base_url + self._url_data_collection
else:
url = self._base_url + self._url_data_collection + "/%s" % countryName
params = {
"f" : "token"
}
_addDerivVals = ["percent","index","average","all","*"]
if addDerivativeVariables in _addDerivVals:
params['addDerivativeVariables'] = addDerivativeVariables
if not outFields is None:
params['outFields'] = outFields
if not suppressNullValues is None and \
isinstance(suppressNullValues, bool):
if suppressNullValues:
params['suppressNullValues'] = "true"
else:
params['suppressNullValues'] = "false"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The GeoEnrichment service uses the concept of a data collection to
define the data attributes returned by the enrichment service. Each
data collection has a unique name that acts as an ID that is passed
in the dataCollections parameter of the GeoEnrichment service.
Some data collections (such as default) can be used in all
supported countries. Other data collections may only be available
in one or a collection of countries. Data collections may only be
available in a subset of countries because of differences in the
demographic data that is available for each country. A list of data
collections for all available countries can be generated with the
data collection discover method.
For full help please go here:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/
Inputs:
countryName - lets the user supply and optional name of a
country in order to get information about the data collections
in that given country.
addDerivativeVariables - Optional parameter to specify a list of
field names that include variables for the derivative
statistics.
outFields - Optional parameter to specify a list of output
fields in the response.
suppressNullValues - Optional parameter to return only values
that are not NULL in the output response. Adding the optional
suppressNullValues parameter to any data collections discovery
method will reduce the size of the output that is returned | Below is the the instruction that describes the task:
### Input:
The GeoEnrichment service uses the concept of a data collection to
define the data attributes returned by the enrichment service. Each
data collection has a unique name that acts as an ID that is passed
in the dataCollections parameter of the GeoEnrichment service.
Some data collections (such as default) can be used in all
supported countries. Other data collections may only be available
in one or a collection of countries. Data collections may only be
available in a subset of countries because of differences in the
demographic data that is available for each country. A list of data
collections for all available countries can be generated with the
data collection discover method.
For full help please go here:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/
Inputs:
countryName - lets the user supply and optional name of a
country in order to get information about the data collections
in that given country.
addDerivativeVariables - Optional parameter to specify a list of
field names that include variables for the derivative
statistics.
outFields - Optional parameter to specify a list of output
fields in the response.
suppressNullValues - Optional parameter to return only values
that are not NULL in the output response. Adding the optional
suppressNullValues parameter to any data collections discovery
method will reduce the size of the output that is returned
### Response:
def dataCollections(self,
countryName=None,
addDerivativeVariables=None,
outFields=None,
suppressNullValues=False):
"""
The GeoEnrichment service uses the concept of a data collection to
define the data attributes returned by the enrichment service. Each
data collection has a unique name that acts as an ID that is passed
in the dataCollections parameter of the GeoEnrichment service.
Some data collections (such as default) can be used in all
supported countries. Other data collections may only be available
in one or a collection of countries. Data collections may only be
available in a subset of countries because of differences in the
demographic data that is available for each country. A list of data
collections for all available countries can be generated with the
data collection discover method.
For full help please go here:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Data_collections/02r30000021t000000/
Inputs:
countryName - lets the user supply and optional name of a
country in order to get information about the data collections
in that given country.
addDerivativeVariables - Optional parameter to specify a list of
field names that include variables for the derivative
statistics.
outFields - Optional parameter to specify a list of output
fields in the response.
suppressNullValues - Optional parameter to return only values
that are not NULL in the output response. Adding the optional
suppressNullValues parameter to any data collections discovery
method will reduce the size of the output that is returned
"""
if addDerivativeVariables is None:
addDerivativeVariables = ["*"]
if outFields is None:
outFields = ["*"]
if countryName is None:
url = self._base_url + self._url_data_collection
else:
url = self._base_url + self._url_data_collection + "/%s" % countryName
params = {
"f" : "token"
}
_addDerivVals = ["percent","index","average","all","*"]
if addDerivativeVariables in _addDerivVals:
params['addDerivativeVariables'] = addDerivativeVariables
if not outFields is None:
params['outFields'] = outFields
if not suppressNullValues is None and \
isinstance(suppressNullValues, bool):
if suppressNullValues:
params['suppressNullValues'] = "true"
else:
params['suppressNullValues'] = "false"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def _extract_instance_info(instances):
'''
Given an instance query, return a dict of all instance data
'''
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret | Given an instance query, return a dict of all instance data | Below is the the instruction that describes the task:
### Input:
Given an instance query, return a dict of all instance data
### Response:
def _extract_instance_info(instances):
'''
Given an instance query, return a dict of all instance data
'''
ret = {}
for instance in instances:
# items could be type dict or list (for stopped EC2 instances)
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
else:
item = instance['instancesSet']['item']
name = _extract_name_tag(item)
ret[name] = item
ret[name]['name'] = name
ret[name].update(
dict(
id=item['instanceId'],
image=item['imageId'],
size=item['instanceType'],
state=item['instanceState']['name'],
private_ips=item.get('privateIpAddress', []),
public_ips=item.get('ipAddress', [])
)
)
return ret |
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result | Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance. | Below is the the instruction that describes the task:
### Input:
Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
### Response:
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result |
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True | Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases. | Below is the the instruction that describes the task:
### Input:
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
### Response:
def __write_aliases_file(lines):
'''
Write a new copy of the aliases file. Lines is a list of lines
as returned by __parse_aliases.
'''
afn = __get_aliases_filename()
adir = os.path.dirname(afn)
out = tempfile.NamedTemporaryFile(dir=adir, delete=False)
if not __opts__.get('integration.test', False):
if os.path.isfile(afn):
afn_st = os.stat(afn)
os.chmod(out.name, stat.S_IMODE(afn_st.st_mode))
os.chown(out.name, afn_st.st_uid, afn_st.st_gid)
else:
os.chmod(out.name, 0o644)
os.chown(out.name, 0, 0)
for (line_alias, line_target, line_comment) in lines:
if isinstance(line_target, list):
line_target = ', '.join(line_target)
if not line_comment:
line_comment = ''
if line_alias and line_target:
write_line = '{0}: {1}{2}\n'.format(
line_alias, line_target, line_comment
)
else:
write_line = '{0}\n'.format(line_comment)
if six.PY3:
write_line = write_line.encode(__salt_system_encoding__)
out.write(write_line)
out.close()
os.rename(out.name, afn)
# Search $PATH for the newalises command
newaliases = salt.utils.path.which('newaliases')
if newaliases is not None:
__salt__['cmd.run'](newaliases)
return True |
def fit(self, X):
"""The K-Means itself
"""
self._X = super().cluster(X)
candidates = []
for _ in range(self.n_runs):
self._init_random_centroids()
while True:
prev_clusters = self.clusters
self._assign_clusters()
self._move_centroids()
if np.all(prev_clusters == self.clusters):
break
self._calc_distortion()
candidates.append((self.distortion, self.centroids, self.clusters))
candidates.sort(key=lambda x: x[0])
self.distortion = candidates[0][0]
self.centroids = candidates[0][1]
self.clusters = candidates[0][2]
return self | The K-Means itself | Below is the the instruction that describes the task:
### Input:
The K-Means itself
### Response:
def fit(self, X):
"""The K-Means itself
"""
self._X = super().cluster(X)
candidates = []
for _ in range(self.n_runs):
self._init_random_centroids()
while True:
prev_clusters = self.clusters
self._assign_clusters()
self._move_centroids()
if np.all(prev_clusters == self.clusters):
break
self._calc_distortion()
candidates.append((self.distortion, self.centroids, self.clusters))
candidates.sort(key=lambda x: x[0])
self.distortion = candidates[0][0]
self.centroids = candidates[0][1]
self.clusters = candidates[0][2]
return self |
def run(self, timeout = 0):
"""
Run a proactor loop and return new socket events. Timeout is a timedelta
object, 0 if active coros or None.
epoll timeout param is a integer number of miliseconds (seconds/1000).
"""
ptimeout = int(timeout.microseconds/1000+timeout.seconds*1000
if timeout else (self.m_resolution if timeout is None else 0))
if self.tokens:
epoll_fd = self.epoll_fd
events = epoll_wait(epoll_fd, 1024, ptimeout)
len_events = len(events)-1
for nr, (ev, fd) in enumerate(events):
act = self.shadow.pop(fd)
if ev & EPOLLHUP:
epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0)
self.handle_error_event(act, 'Hang up.', ConnectionClosed)
elif ev & EPOLLERR:
epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0)
self.handle_error_event(act, 'Unknown error.')
else:
if nr == len_events:
ret = self.yield_event(act)
if not ret:
epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT)
self.shadow[fd] = act
return ret
else:
if not self.handle_event(act):
epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT)
self.shadow[fd] = act
else:
sleep(timeout) | Run a proactor loop and return new socket events. Timeout is a timedelta
object, 0 if active coros or None.
epoll timeout param is a integer number of miliseconds (seconds/1000). | Below is the the instruction that describes the task:
### Input:
Run a proactor loop and return new socket events. Timeout is a timedelta
object, 0 if active coros or None.
epoll timeout param is a integer number of miliseconds (seconds/1000).
### Response:
def run(self, timeout = 0):
"""
Run a proactor loop and return new socket events. Timeout is a timedelta
object, 0 if active coros or None.
epoll timeout param is a integer number of miliseconds (seconds/1000).
"""
ptimeout = int(timeout.microseconds/1000+timeout.seconds*1000
if timeout else (self.m_resolution if timeout is None else 0))
if self.tokens:
epoll_fd = self.epoll_fd
events = epoll_wait(epoll_fd, 1024, ptimeout)
len_events = len(events)-1
for nr, (ev, fd) in enumerate(events):
act = self.shadow.pop(fd)
if ev & EPOLLHUP:
epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0)
self.handle_error_event(act, 'Hang up.', ConnectionClosed)
elif ev & EPOLLERR:
epoll_ctl(self.epoll_fd, EPOLL_CTL_DEL, fd, 0)
self.handle_error_event(act, 'Unknown error.')
else:
if nr == len_events:
ret = self.yield_event(act)
if not ret:
epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT)
self.shadow[fd] = act
return ret
else:
if not self.handle_event(act):
epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, ev | EPOLLONESHOT)
self.shadow[fd] = act
else:
sleep(timeout) |
def organization_membership_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership"
api_path = "/api/v2/organization_memberships.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership
### Response:
def organization_membership_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership"
api_path = "/api/v2/organization_memberships.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
def pandas2igraph(self, edges, directed=True):
"""Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
"""
import igraph
self._check_mandatory_bindings(False)
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
self._node = self._node or Plotter._defaultNodeId
eattribs = edges.columns.values.tolist()
eattribs.remove(self._source)
eattribs.remove(self._destination)
cols = [self._source, self._destination] + eattribs
etuples = [tuple(x) for x in edges[cols].values]
return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs,
vertex_name_attr=self._node) | Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig) | Below is the the instruction that describes the task:
### Input:
Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
### Response:
def pandas2igraph(self, edges, directed=True):
"""Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
"""
import igraph
self._check_mandatory_bindings(False)
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
self._node = self._node or Plotter._defaultNodeId
eattribs = edges.columns.values.tolist()
eattribs.remove(self._source)
eattribs.remove(self._destination)
cols = [self._source, self._destination] + eattribs
etuples = [tuple(x) for x in edges[cols].values]
return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs,
vertex_name_attr=self._node) |
def img2img_transformer_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 1
hparams.pos = "timing"
return hparams | Tiny params. | Below is the the instruction that describes the task:
### Input:
Tiny params.
### Response:
def img2img_transformer_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 1
hparams.pos = "timing"
return hparams |
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False,
prompt=None, search_dirs=None, download=False,
no_setuptools=False, no_pip=False, no_wheel=False,
symlink=True):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear, symlink=symlink))
install_distutils(home_dir)
to_install = []
if not no_setuptools:
to_install.append('setuptools')
if not no_pip:
to_install.append('pip')
if not no_wheel:
to_install.append('wheel')
if to_install:
install_wheel(
to_install,
py_executable,
search_dirs,
download=download,
)
install_activate(home_dir, bin_dir, prompt)
install_python_config(home_dir, bin_dir, prompt) | Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared. | Below is the the instruction that describes the task:
### Input:
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
### Response:
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False,
prompt=None, search_dirs=None, download=False,
no_setuptools=False, no_pip=False, no_wheel=False,
symlink=True):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear, symlink=symlink))
install_distutils(home_dir)
to_install = []
if not no_setuptools:
to_install.append('setuptools')
if not no_pip:
to_install.append('pip')
if not no_wheel:
to_install.append('wheel')
if to_install:
install_wheel(
to_install,
py_executable,
search_dirs,
download=download,
)
install_activate(home_dir, bin_dir, prompt)
install_python_config(home_dir, bin_dir, prompt) |
def add_or_update_record(
self, final_path, ase, chunk_size, next_integrity_chunk,
completed, md5):
# type: (DownloadResumeManager, pathlib.Path,
# blobxfer.models.azure.StorageEntity, int, int, bool,
# str) -> None
"""Add or update a resume record
:param DownloadResumeManager self: this
:param pathlib.Path final_path: final path
:param blobxfer.models.azure.StorageEntity ase: Storage Entity
:param int chunk_size: chunk size in bytes
:param int next_integrity_chunk: next integrity chunk
:param bool completed: if completed
:param str md5: md5 hex digest
"""
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(ase)
with self.datalock():
dl = self.get_record(ase, key=key, lock=False)
if dl is None:
dl = blobxfer.models.resume.Download(
final_path=str(final_path),
length=ase._size,
chunk_size=chunk_size,
next_integrity_chunk=next_integrity_chunk,
completed=completed,
md5=md5,
)
else:
if (dl.completed or
next_integrity_chunk < dl.next_integrity_chunk):
return
if completed:
dl.completed = completed
else:
dl.next_integrity_chunk = next_integrity_chunk
dl.md5hexdigest = md5
self._data[key] = dl
self._data.sync() | Add or update a resume record
:param DownloadResumeManager self: this
:param pathlib.Path final_path: final path
:param blobxfer.models.azure.StorageEntity ase: Storage Entity
:param int chunk_size: chunk size in bytes
:param int next_integrity_chunk: next integrity chunk
:param bool completed: if completed
:param str md5: md5 hex digest | Below is the the instruction that describes the task:
### Input:
Add or update a resume record
:param DownloadResumeManager self: this
:param pathlib.Path final_path: final path
:param blobxfer.models.azure.StorageEntity ase: Storage Entity
:param int chunk_size: chunk size in bytes
:param int next_integrity_chunk: next integrity chunk
:param bool completed: if completed
:param str md5: md5 hex digest
### Response:
def add_or_update_record(
self, final_path, ase, chunk_size, next_integrity_chunk,
completed, md5):
# type: (DownloadResumeManager, pathlib.Path,
# blobxfer.models.azure.StorageEntity, int, int, bool,
# str) -> None
"""Add or update a resume record
:param DownloadResumeManager self: this
:param pathlib.Path final_path: final path
:param blobxfer.models.azure.StorageEntity ase: Storage Entity
:param int chunk_size: chunk size in bytes
:param int next_integrity_chunk: next integrity chunk
:param bool completed: if completed
:param str md5: md5 hex digest
"""
key = blobxfer.operations.resume._BaseResumeManager.\
generate_record_key(ase)
with self.datalock():
dl = self.get_record(ase, key=key, lock=False)
if dl is None:
dl = blobxfer.models.resume.Download(
final_path=str(final_path),
length=ase._size,
chunk_size=chunk_size,
next_integrity_chunk=next_integrity_chunk,
completed=completed,
md5=md5,
)
else:
if (dl.completed or
next_integrity_chunk < dl.next_integrity_chunk):
return
if completed:
dl.completed = completed
else:
dl.next_integrity_chunk = next_integrity_chunk
dl.md5hexdigest = md5
self._data[key] = dl
self._data.sync() |
def show(self, title=None, method='', indices=None, force_show=False,
fig=None, **kwargs):
"""Display the function graphically.
Parameters
----------
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
``'plot'`` : graph plot
``'scatter'`` : scattered 2d points (2nd axis <-> value)
2d methods:
``'imshow'`` : image plot with coloring according to
value, including a colorbar.
``'scatter'`` : cloud of scattered 3d points
(3rd axis <-> value)
indices : index expression, optional
Display a slice of the array instead of the full array. The
index expression is most easily created with the `numpy.s_`
constructor, i.e. supply ``np.s_[:, 1, :]`` to display the
first slice along the second axis.
For data with 3 or more dimensions, the 2d slice in the first
two axes at the "middle" along the remaining axes is shown
(semantically ``[:, :, shape[2:] // 2]``).
This option is mutually exclusive to ``coords``.
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as
the figure given by this function. The most common use case
is that ``fig`` is the return value of an earlier call to
this function.
kwargs : {'figsize', 'saveto', 'clim', ...}, optional
Extra keyword arguments passed on to the display method.
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
odl.util.graphics.show_discrete_data : Underlying implementation
"""
from odl.discr import uniform_grid
from odl.util.graphics import show_discrete_data
# Default to showing x-y slice "in the middle"
if indices is None and self.ndim >= 3:
indices = tuple(
[slice(None)] * 2 + [n // 2 for n in self.space.shape[2:]]
)
if isinstance(indices, (Integral, slice)):
indices = (indices,)
elif indices is None or indices == Ellipsis:
indices = (slice(None),) * self.ndim
else:
indices = tuple(indices)
# Replace None by slice(None)
indices = tuple(slice(None) if idx is None else idx for idx in indices)
if Ellipsis in indices:
# Replace Ellipsis with the correct number of [:] expressions
pos = indices.index(Ellipsis)
indices = (indices[:pos] +
(np.s_[:], ) * (self.ndim - len(indices) + 1) +
indices[pos + 1:])
if len(indices) < self.ndim:
raise ValueError('too few axes ({} < {})'.format(len(indices),
self.ndim))
if len(indices) > self.ndim:
raise ValueError('too many axes ({} > {})'.format(len(indices),
self.ndim))
# Squeeze grid and values according to the index expression
full_grid = uniform_grid([0] * self.ndim, np.array(self.shape) - 1,
self.shape)
grid = full_grid[indices].squeeze()
values = self.asarray()[indices].squeeze()
return show_discrete_data(values, grid, title=title, method=method,
force_show=force_show, fig=fig, **kwargs) | Display the function graphically.
Parameters
----------
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
``'plot'`` : graph plot
``'scatter'`` : scattered 2d points (2nd axis <-> value)
2d methods:
``'imshow'`` : image plot with coloring according to
value, including a colorbar.
``'scatter'`` : cloud of scattered 3d points
(3rd axis <-> value)
indices : index expression, optional
Display a slice of the array instead of the full array. The
index expression is most easily created with the `numpy.s_`
constructor, i.e. supply ``np.s_[:, 1, :]`` to display the
first slice along the second axis.
For data with 3 or more dimensions, the 2d slice in the first
two axes at the "middle" along the remaining axes is shown
(semantically ``[:, :, shape[2:] // 2]``).
This option is mutually exclusive to ``coords``.
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as
the figure given by this function. The most common use case
is that ``fig`` is the return value of an earlier call to
this function.
kwargs : {'figsize', 'saveto', 'clim', ...}, optional
Extra keyword arguments passed on to the display method.
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
odl.util.graphics.show_discrete_data : Underlying implementation | Below is the the instruction that describes the task:
### Input:
Display the function graphically.
Parameters
----------
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
``'plot'`` : graph plot
``'scatter'`` : scattered 2d points (2nd axis <-> value)
2d methods:
``'imshow'`` : image plot with coloring according to
value, including a colorbar.
``'scatter'`` : cloud of scattered 3d points
(3rd axis <-> value)
indices : index expression, optional
Display a slice of the array instead of the full array. The
index expression is most easily created with the `numpy.s_`
constructor, i.e. supply ``np.s_[:, 1, :]`` to display the
first slice along the second axis.
For data with 3 or more dimensions, the 2d slice in the first
two axes at the "middle" along the remaining axes is shown
(semantically ``[:, :, shape[2:] // 2]``).
This option is mutually exclusive to ``coords``.
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as
the figure given by this function. The most common use case
is that ``fig`` is the return value of an earlier call to
this function.
kwargs : {'figsize', 'saveto', 'clim', ...}, optional
Extra keyword arguments passed on to the display method.
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
odl.util.graphics.show_discrete_data : Underlying implementation
### Response:
def show(self, title=None, method='', indices=None, force_show=False,
fig=None, **kwargs):
"""Display the function graphically.
Parameters
----------
title : string, optional
Set the title of the figure
method : string, optional
1d methods:
``'plot'`` : graph plot
``'scatter'`` : scattered 2d points (2nd axis <-> value)
2d methods:
``'imshow'`` : image plot with coloring according to
value, including a colorbar.
``'scatter'`` : cloud of scattered 3d points
(3rd axis <-> value)
indices : index expression, optional
Display a slice of the array instead of the full array. The
index expression is most easily created with the `numpy.s_`
constructor, i.e. supply ``np.s_[:, 1, :]`` to display the
first slice along the second axis.
For data with 3 or more dimensions, the 2d slice in the first
two axes at the "middle" along the remaining axes is shown
(semantically ``[:, :, shape[2:] // 2]``).
This option is mutually exclusive to ``coords``.
force_show : bool, optional
Whether the plot should be forced to be shown now or deferred until
later. Note that some backends always displays the plot, regardless
of this value.
fig : `matplotlib.figure.Figure`, optional
The figure to show in. Expected to be of same "style", as
the figure given by this function. The most common use case
is that ``fig`` is the return value of an earlier call to
this function.
kwargs : {'figsize', 'saveto', 'clim', ...}, optional
Extra keyword arguments passed on to the display method.
See the Matplotlib functions for documentation of extra
options.
Returns
-------
fig : `matplotlib.figure.Figure`
The resulting figure. It is also shown to the user.
See Also
--------
odl.util.graphics.show_discrete_data : Underlying implementation
"""
from odl.discr import uniform_grid
from odl.util.graphics import show_discrete_data
# Default to showing x-y slice "in the middle"
if indices is None and self.ndim >= 3:
indices = tuple(
[slice(None)] * 2 + [n // 2 for n in self.space.shape[2:]]
)
if isinstance(indices, (Integral, slice)):
indices = (indices,)
elif indices is None or indices == Ellipsis:
indices = (slice(None),) * self.ndim
else:
indices = tuple(indices)
# Replace None by slice(None)
indices = tuple(slice(None) if idx is None else idx for idx in indices)
if Ellipsis in indices:
# Replace Ellipsis with the correct number of [:] expressions
pos = indices.index(Ellipsis)
indices = (indices[:pos] +
(np.s_[:], ) * (self.ndim - len(indices) + 1) +
indices[pos + 1:])
if len(indices) < self.ndim:
raise ValueError('too few axes ({} < {})'.format(len(indices),
self.ndim))
if len(indices) > self.ndim:
raise ValueError('too many axes ({} > {})'.format(len(indices),
self.ndim))
# Squeeze grid and values according to the index expression
full_grid = uniform_grid([0] * self.ndim, np.array(self.shape) - 1,
self.shape)
grid = full_grid[indices].squeeze()
values = self.asarray()[indices].squeeze()
return show_discrete_data(values, grid, title=title, method=method,
force_show=force_show, fig=fig, **kwargs) |
def save_source(driver, name):
"""
Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None
"""
source = driver.page_source
file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'),
'{name}.html'.format(name=name))
try:
with open(file_name, 'wb') as output_file:
output_file.write(source.encode('utf-8'))
except Exception: # pylint: disable=broad-except
msg = u"Could not save the browser page source to {}.".format(file_name)
LOGGER.warning(msg) | Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None | Below is the the instruction that describes the task:
### Input:
Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None
### Response:
def save_source(driver, name):
"""
Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None
"""
source = driver.page_source
file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'),
'{name}.html'.format(name=name))
try:
with open(file_name, 'wb') as output_file:
output_file.write(source.encode('utf-8'))
except Exception: # pylint: disable=broad-except
msg = u"Could not save the browser page source to {}.".format(file_name)
LOGGER.warning(msg) |
def operate_menu():
"Select between these operations on the database"
selection = True
while selection:
print globals()['operate_menu'].__doc__
selection = select([
'chill.database functions',
'execute sql file',
'render_node',
'New collection',
'Manage collection',
'Add document for node',
'help',
])
if selection == 'chill.database functions':
mode_database_functions()
elif selection == 'execute sql file':
print "View the sql file and show a fill in the blanks interface with raw_input"
sqlfile = choose_query_file()
if not sqlfile:
# return to the menu choices if not file picked
selection = True
else:
sql_named_placeholders_re = re.compile(r":(\w+)")
sql = fetch_query_string(sqlfile)
placeholders = set(sql_named_placeholders_re.findall(sql))
print sql
data = {}
for placeholder in placeholders:
value = raw_input(placeholder + ': ')
data[placeholder] = value
result = []
try:
result = db.execute(text(sql), data)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result and result.returns_rows:
result = result.fetchall()
print result
if not result:
print 'No results.'
else:
kw = result[0]
if 'node_id' in kw:
print 'render node %s' % kw['node_id']
value = render_node(kw['node_id'], **kw)
print safe_dump(value, default_flow_style=False)
else:
#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)
print safe_dump([dict(zip(x.keys(), x.values())) for x in result], default_flow_style=False)
elif selection == 'render_node':
print globals()['render_node'].__doc__
node_id = existing_node_input()
value = render_value_for_node(node_id)
print safe_dump(value, default_flow_style=False)
elif selection == 'New collection':
mode_new_collection()
elif selection == 'Manage collection':
mode_collection()
elif selection == 'Add document for node':
folder = current_app.config.get('DOCUMENT_FOLDER')
if not folder:
print "No DOCUMENT_FOLDER configured for the application."
else:
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
if len(choices) == 0:
print "No files found in DOCUMENT_FOLDER."
else:
filename = select(choices)
if filename:
defaultname = os.path.splitext(filename)[0]
nodename = raw_input("Enter name for node [{0}]: ".format(defaultname)) or defaultname
node = insert_node(name=nodename, value=filename)
print "Added document '%s' to node '%s' with id: %s" % (filename, nodename, node)
elif selection == 'help':
print "------"
print __doc__
print "------"
else:
print 'Done' | Select between these operations on the database | Below is the the instruction that describes the task:
### Input:
Select between these operations on the database
### Response:
def operate_menu():
"Select between these operations on the database"
selection = True
while selection:
print globals()['operate_menu'].__doc__
selection = select([
'chill.database functions',
'execute sql file',
'render_node',
'New collection',
'Manage collection',
'Add document for node',
'help',
])
if selection == 'chill.database functions':
mode_database_functions()
elif selection == 'execute sql file':
print "View the sql file and show a fill in the blanks interface with raw_input"
sqlfile = choose_query_file()
if not sqlfile:
# return to the menu choices if not file picked
selection = True
else:
sql_named_placeholders_re = re.compile(r":(\w+)")
sql = fetch_query_string(sqlfile)
placeholders = set(sql_named_placeholders_re.findall(sql))
print sql
data = {}
for placeholder in placeholders:
value = raw_input(placeholder + ': ')
data[placeholder] = value
result = []
try:
result = db.execute(text(sql), data)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result and result.returns_rows:
result = result.fetchall()
print result
if not result:
print 'No results.'
else:
kw = result[0]
if 'node_id' in kw:
print 'render node %s' % kw['node_id']
value = render_node(kw['node_id'], **kw)
print safe_dump(value, default_flow_style=False)
else:
#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)
print safe_dump([dict(zip(x.keys(), x.values())) for x in result], default_flow_style=False)
elif selection == 'render_node':
print globals()['render_node'].__doc__
node_id = existing_node_input()
value = render_value_for_node(node_id)
print safe_dump(value, default_flow_style=False)
elif selection == 'New collection':
mode_new_collection()
elif selection == 'Manage collection':
mode_collection()
elif selection == 'Add document for node':
folder = current_app.config.get('DOCUMENT_FOLDER')
if not folder:
print "No DOCUMENT_FOLDER configured for the application."
else:
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
if len(choices) == 0:
print "No files found in DOCUMENT_FOLDER."
else:
filename = select(choices)
if filename:
defaultname = os.path.splitext(filename)[0]
nodename = raw_input("Enter name for node [{0}]: ".format(defaultname)) or defaultname
node = insert_node(name=nodename, value=filename)
print "Added document '%s' to node '%s' with id: %s" % (filename, nodename, node)
elif selection == 'help':
print "------"
print __doc__
print "------"
else:
print 'Done' |
def error(self, message, print_help=False):
"""Provide a more helpful message if there are too few arguments."""
if 'too few arguments' in message.lower():
target = sys.argv.pop(0)
sys.argv.insert(
0, os.path.basename(target) or os.path.relpath(target))
message = ("%s. Try getting help with `%s --help`"
% (message, " ".join(sys.argv)))
if print_help:
self.print_help()
else:
self.print_usage()
sys.stderr.write('\nerror: %s\n' % message)
sys.exit(2) | Provide a more helpful message if there are too few arguments. | Below is the the instruction that describes the task:
### Input:
Provide a more helpful message if there are too few arguments.
### Response:
def error(self, message, print_help=False):
"""Provide a more helpful message if there are too few arguments."""
if 'too few arguments' in message.lower():
target = sys.argv.pop(0)
sys.argv.insert(
0, os.path.basename(target) or os.path.relpath(target))
message = ("%s. Try getting help with `%s --help`"
% (message, " ".join(sys.argv)))
if print_help:
self.print_help()
else:
self.print_usage()
sys.stderr.write('\nerror: %s\n' % message)
sys.exit(2) |
def default_freq(**indexer):
"""Return the default frequency."""
freq = 'AS-JAN'
if indexer:
if 'DJF' in indexer.values():
freq = 'AS-DEC'
if 'month' in indexer and sorted(indexer.values()) != indexer.values():
raise (NotImplementedError)
return freq | Return the default frequency. | Below is the the instruction that describes the task:
### Input:
Return the default frequency.
### Response:
def default_freq(**indexer):
"""Return the default frequency."""
freq = 'AS-JAN'
if indexer:
if 'DJF' in indexer.values():
freq = 'AS-DEC'
if 'month' in indexer and sorted(indexer.values()) != indexer.values():
raise (NotImplementedError)
return freq |
def _getattr(self, attri, fname=None, numtype='cycNum'):
''' Private method for getting an attribute, called from get.'''
if str(fname.__class__)=="<type 'list'>":
isList=True
else:
isList=False
data=[]
if fname==None:
fname=self.files
numtype='file'
isList=True
if isList:
for i in range(len(fname)):
if attri in self.cattrs:
data.append(self.getCycleData(attri,fname[i],numtype))
elif attri in self.dcols:
data.append(self.getColData(attri,fname[i],numtype))
elif attri in self.get('ISOTP',fname,numtype):
data.append(self.getElement(attri,fname[i],numtype))
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
else:
if attri in self.cattrs:
return self.getCycleData(attri,fname,numtype)
elif attri in self.dcols:
return self.getColData(attri,fname,numtype)
elif attri in self.get('ISOTP',fname,numtype):
return self.getElement(attri,fname,numtype)
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
return data | Private method for getting an attribute, called from get. | Below is the the instruction that describes the task:
### Input:
Private method for getting an attribute, called from get.
### Response:
def _getattr(self, attri, fname=None, numtype='cycNum'):
''' Private method for getting an attribute, called from get.'''
if str(fname.__class__)=="<type 'list'>":
isList=True
else:
isList=False
data=[]
if fname==None:
fname=self.files
numtype='file'
isList=True
if isList:
for i in range(len(fname)):
if attri in self.cattrs:
data.append(self.getCycleData(attri,fname[i],numtype))
elif attri in self.dcols:
data.append(self.getColData(attri,fname[i],numtype))
elif attri in self.get('ISOTP',fname,numtype):
data.append(self.getElement(attri,fname[i],numtype))
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
else:
if attri in self.cattrs:
return self.getCycleData(attri,fname,numtype)
elif attri in self.dcols:
return self.getColData(attri,fname,numtype)
elif attri in self.get('ISOTP',fname,numtype):
return self.getElement(attri,fname,numtype)
else:
print('Attribute '+attri+ ' does not exist')
print('Returning none')
return None
return data |
def check_output_error_and_retcode(*popenargs, **kwargs):
"""
This function is used to obtain the stdout of a command. It is only used
internally, recommend using the make_external_call command if you want
to call external executables.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, error = process.communicate()
retcode = process.poll()
return output, error, retcode | This function is used to obtain the stdout of a command. It is only used
internally, recommend using the make_external_call command if you want
to call external executables. | Below is the the instruction that describes the task:
### Input:
This function is used to obtain the stdout of a command. It is only used
internally, recommend using the make_external_call command if you want
to call external executables.
### Response:
def check_output_error_and_retcode(*popenargs, **kwargs):
"""
This function is used to obtain the stdout of a command. It is only used
internally, recommend using the make_external_call command if you want
to call external executables.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, error = process.communicate()
retcode = process.poll()
return output, error, retcode |
def route_acl(self, *acl, **options):
"""Decorator to attach an ACL to a route.
E.g::
@app.route('/url/to/view')
@authz.route_acl('''
ALLOW WHEEL ALL
DENY ANY ALL
''')
def my_admin_function():
pass
"""
def _route_acl(func):
func.__acl__ = acl
@functools.wraps(func)
def wrapped(*args, **kwargs):
permission = 'http.' + request.method.lower()
local_opts = options.copy()
local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE'])
self.assert_can(permission, func, **local_opts)
return func(*args, **kwargs)
return wrapped
return _route_acl | Decorator to attach an ACL to a route.
E.g::
@app.route('/url/to/view')
@authz.route_acl('''
ALLOW WHEEL ALL
DENY ANY ALL
''')
def my_admin_function():
pass | Below is the the instruction that describes the task:
### Input:
Decorator to attach an ACL to a route.
E.g::
@app.route('/url/to/view')
@authz.route_acl('''
ALLOW WHEEL ALL
DENY ANY ALL
''')
def my_admin_function():
pass
### Response:
def route_acl(self, *acl, **options):
"""Decorator to attach an ACL to a route.
E.g::
@app.route('/url/to/view')
@authz.route_acl('''
ALLOW WHEEL ALL
DENY ANY ALL
''')
def my_admin_function():
pass
"""
def _route_acl(func):
func.__acl__ = acl
@functools.wraps(func)
def wrapped(*args, **kwargs):
permission = 'http.' + request.method.lower()
local_opts = options.copy()
local_opts.setdefault('default', current_app.config['ACL_ROUTE_DEFAULT_STATE'])
self.assert_can(permission, func, **local_opts)
return func(*args, **kwargs)
return wrapped
return _route_acl |
def check_packages(db_name):
"""
Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return:
"""
print('Checking for required Database Driver')
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
# print(installed_packages)
if db_name.lower() == 'mysql':
if 'PyMySQL' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install pymysql')
if db_name.lower() == 'postgresql':
if 'psycopg2-binary' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install psycopg2-binary')
return True | Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return: | Below is the the instruction that describes the task:
### Input:
Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return:
### Response:
def check_packages(db_name):
"""
Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return:
"""
print('Checking for required Database Driver')
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
# print(installed_packages)
if db_name.lower() == 'mysql':
if 'PyMySQL' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install pymysql')
if db_name.lower() == 'postgresql':
if 'psycopg2-binary' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install psycopg2-binary')
return True |
def get_session(self, sid, namespace=None):
"""Return the user session for a client.
:param sid: The session id of the client.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used.
"""
namespace = namespace or '/'
eio_session = self.eio.get_session(sid)
return eio_session.setdefault(namespace, {}) | Return the user session for a client.
:param sid: The session id of the client.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used. | Below is the the instruction that describes the task:
### Input:
Return the user session for a client.
:param sid: The session id of the client.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used.
### Response:
def get_session(self, sid, namespace=None):
"""Return the user session for a client.
:param sid: The session id of the client.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved unless
``save_session()`` is called, or when the ``session`` context manager
is used.
"""
namespace = namespace or '/'
eio_session = self.eio.get_session(sid)
return eio_session.setdefault(namespace, {}) |
def local_manager_rule(self):
"""Return rule for local manager.
"""
adm_gid = self.local_manager_gid
if not adm_gid:
return None
config = self.root['settings']['ugm_localmanager'].attrs
return config[adm_gid] | Return rule for local manager. | Below is the the instruction that describes the task:
### Input:
Return rule for local manager.
### Response:
def local_manager_rule(self):
"""Return rule for local manager.
"""
adm_gid = self.local_manager_gid
if not adm_gid:
return None
config = self.root['settings']['ugm_localmanager'].attrs
return config[adm_gid] |
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content | Health FireCloud Server | Below is the the instruction that describes the task:
### Input:
Health FireCloud Server
### Response:
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content |
def schedule2calendar(schedule, name='课葨', using_todo=True):
"""
ε°δΈθ―ΎζΆι΄θ‘¨θ½¬ζ’δΈΊ icalendar
:param schedule: δΈθ―ΎζΆι΄θ‘¨
:param name: ζ₯εεη§°
:param using_todo: δ½Ώη¨ ``icalendar.Todo`` θδΈζ― ``icalendar.Event`` δ½δΈΊζ΄»ε¨η±»
:return: icalendar.Calendar()
"""
# https://zh.wikipedia.org/wiki/ICalendar
# http://icalendar.readthedocs.io/en/latest
# https://tools.ietf.org/html/rfc5545
cal = icalendar.Calendar()
cal.add('X-WR-TIMEZONE', 'Asia/Shanghai')
cal.add('X-WR-CALNAME', name)
cls = icalendar.Todo if using_todo else icalendar.Event
for week, start, end, data in schedule:
# "δΊδ»Ά"η»δ»Άζ΄ε
·ιη¨ζ§, Google ζ₯εδΈζ―ζ"εΎ
ε"η»δ»Ά
item = cls(
SUMMARY='第{:02d}ε¨-{}'.format(week, data),
DTSTART=icalendar.vDatetime(start),
DTEND=icalendar.vDatetime(end),
DESCRIPTION='θ΅·ε§δΊ {}, η»ζδΊ {}'.format(start.strftime('%H:%M'), end.strftime('%H:%M'))
)
now = datetime.now()
# θΏδΈͺηΆζ"δΊδ»Ά"η»δ»Άζ―沑ζη, ε―ΉδΊεΎ
εε葨类εΊη¨ζδ½η¨
# https://tools.ietf.org/html/rfc5545#section-3.2.12
if using_todo:
if start < now < end:
item.add('STATUS', 'IN-PROCESS')
elif now > end:
item.add('STATUS', 'COMPLETED')
cal.add_component(item)
return cal | ε°δΈθ―ΎζΆι΄θ‘¨θ½¬ζ’δΈΊ icalendar
:param schedule: δΈθ―ΎζΆι΄θ‘¨
:param name: ζ₯εεη§°
:param using_todo: δ½Ώη¨ ``icalendar.Todo`` θδΈζ― ``icalendar.Event`` δ½δΈΊζ΄»ε¨η±»
:return: icalendar.Calendar() | Below is the the instruction that describes the task:
### Input:
ε°δΈθ―ΎζΆι΄θ‘¨θ½¬ζ’δΈΊ icalendar
:param schedule: δΈθ―ΎζΆι΄θ‘¨
:param name: ζ₯εεη§°
:param using_todo: δ½Ώη¨ ``icalendar.Todo`` θδΈζ― ``icalendar.Event`` δ½δΈΊζ΄»ε¨η±»
:return: icalendar.Calendar()
### Response:
def schedule2calendar(schedule, name='课葨', using_todo=True):
"""
ε°δΈθ―ΎζΆι΄θ‘¨θ½¬ζ’δΈΊ icalendar
:param schedule: δΈθ―ΎζΆι΄θ‘¨
:param name: ζ₯εεη§°
:param using_todo: δ½Ώη¨ ``icalendar.Todo`` θδΈζ― ``icalendar.Event`` δ½δΈΊζ΄»ε¨η±»
:return: icalendar.Calendar()
"""
# https://zh.wikipedia.org/wiki/ICalendar
# http://icalendar.readthedocs.io/en/latest
# https://tools.ietf.org/html/rfc5545
cal = icalendar.Calendar()
cal.add('X-WR-TIMEZONE', 'Asia/Shanghai')
cal.add('X-WR-CALNAME', name)
cls = icalendar.Todo if using_todo else icalendar.Event
for week, start, end, data in schedule:
# "δΊδ»Ά"η»δ»Άζ΄ε
·ιη¨ζ§, Google ζ₯εδΈζ―ζ"εΎ
ε"η»δ»Ά
item = cls(
SUMMARY='第{:02d}ε¨-{}'.format(week, data),
DTSTART=icalendar.vDatetime(start),
DTEND=icalendar.vDatetime(end),
DESCRIPTION='θ΅·ε§δΊ {}, η»ζδΊ {}'.format(start.strftime('%H:%M'), end.strftime('%H:%M'))
)
now = datetime.now()
# θΏδΈͺηΆζ"δΊδ»Ά"η»δ»Άζ―沑ζη, ε―ΉδΊεΎ
εε葨类εΊη¨ζδ½η¨
# https://tools.ietf.org/html/rfc5545#section-3.2.12
if using_todo:
if start < now < end:
item.add('STATUS', 'IN-PROCESS')
elif now > end:
item.add('STATUS', 'COMPLETED')
cal.add_component(item)
return cal |
def clear_caches(): # suppress(unused-function)
"""Clear all caches."""
for _, reader in _spellchecker_cache.values():
reader.close()
_spellchecker_cache.clear()
_valid_words_cache.clear()
_user_dictionary_cache.clear() | Clear all caches. | Below is the the instruction that describes the task:
### Input:
Clear all caches.
### Response:
def clear_caches(): # suppress(unused-function)
"""Clear all caches."""
for _, reader in _spellchecker_cache.values():
reader.close()
_spellchecker_cache.clear()
_valid_words_cache.clear()
_user_dictionary_cache.clear() |
def text(draw, xy, txt, fill=None, font=None):
"""
Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the
prescribed fill and font.
:param draw: A valid canvas to draw the text onto.
:type draw: PIL.ImageDraw
:param txt: The text string to display (must be ASCII only).
:type txt: str
:param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the
text.
:type xy: tuple
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
"""
font = font or DEFAULT_FONT
x, y = xy
for ch in txt:
for byte in font[ord(ch)]:
for j in range(8):
if byte & 0x01 > 0:
draw.point((x, y + j), fill=fill)
byte >>= 1
x += 1 | Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the
prescribed fill and font.
:param draw: A valid canvas to draw the text onto.
:type draw: PIL.ImageDraw
:param txt: The text string to display (must be ASCII only).
:type txt: str
:param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the
text.
:type xy: tuple
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use. | Below is the the instruction that describes the task:
### Input:
Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the
prescribed fill and font.
:param draw: A valid canvas to draw the text onto.
:type draw: PIL.ImageDraw
:param txt: The text string to display (must be ASCII only).
:type txt: str
:param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the
text.
:type xy: tuple
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
### Response:
def text(draw, xy, txt, fill=None, font=None):
"""
Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the
prescribed fill and font.
:param draw: A valid canvas to draw the text onto.
:type draw: PIL.ImageDraw
:param txt: The text string to display (must be ASCII only).
:type txt: str
:param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the
text.
:type xy: tuple
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
"""
font = font or DEFAULT_FONT
x, y = xy
for ch in txt:
for byte in font[ord(ch)]:
for j in range(8):
if byte & 0x01 > 0:
draw.point((x, y + j), fill=fill)
byte >>= 1
x += 1 |
def as_yml(self):
"""
Return yml compatible version of self
"""
return YmlFileEvent(name=str(self.name),
subfolder=str(self.subfolder)) | Return yml compatible version of self | Below is the the instruction that describes the task:
### Input:
Return yml compatible version of self
### Response:
def as_yml(self):
"""
Return yml compatible version of self
"""
return YmlFileEvent(name=str(self.name),
subfolder=str(self.subfolder)) |
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child | Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance | Below is the the instruction that describes the task:
### Input:
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
### Response:
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child |
def version(*args, **attrs):
"""Show the version and exit."""
if hasattr(sys, "_getframe"):
package = attrs.pop("package", sys._getframe(1).f_globals.get("__package__"))
if package:
attrs.setdefault("version", get_version(package))
return click.version_option(*args, **attrs) | Show the version and exit. | Below is the the instruction that describes the task:
### Input:
Show the version and exit.
### Response:
def version(*args, **attrs):
"""Show the version and exit."""
if hasattr(sys, "_getframe"):
package = attrs.pop("package", sys._getframe(1).f_globals.get("__package__"))
if package:
attrs.setdefault("version", get_version(package))
return click.version_option(*args, **attrs) |
def prior_rvs(self, size=1, prior=None):
"""Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values.
"""
# draw values from the prior
if prior is None:
prior = self.prior_distribution
p0 = prior.rvs(size=size)
# transform if necessary
if self.sampling_transforms is not None:
ptrans = self.sampling_transforms.apply(p0)
# pull out the sampling args
p0 = FieldArray.from_arrays([ptrans[arg]
for arg in self.sampling_params],
names=self.sampling_params)
return p0 | Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values. | Below is the the instruction that describes the task:
### Input:
Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values.
### Response:
def prior_rvs(self, size=1, prior=None):
"""Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values.
"""
# draw values from the prior
if prior is None:
prior = self.prior_distribution
p0 = prior.rvs(size=size)
# transform if necessary
if self.sampling_transforms is not None:
ptrans = self.sampling_transforms.apply(p0)
# pull out the sampling args
p0 = FieldArray.from_arrays([ptrans[arg]
for arg in self.sampling_params],
names=self.sampling_params)
return p0 |
def resolve_model(self, model):
'''
Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists
'''
if not model:
raise ValueError('Unsupported model specifications')
if isinstance(model, basestring):
classname = model
elif isinstance(model, dict) and 'class' in model:
classname = model['class']
else:
raise ValueError('Unsupported model specifications')
try:
return get_document(classname)
except self.NotRegistered:
message = 'Model "{0}" does not exist'.format(classname)
raise ValueError(message) | Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists | Below is the the instruction that describes the task:
### Input:
Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists
### Response:
def resolve_model(self, model):
'''
Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists
'''
if not model:
raise ValueError('Unsupported model specifications')
if isinstance(model, basestring):
classname = model
elif isinstance(model, dict) and 'class' in model:
classname = model['class']
else:
raise ValueError('Unsupported model specifications')
try:
return get_document(classname)
except self.NotRegistered:
message = 'Model "{0}" does not exist'.format(classname)
raise ValueError(message) |
def reorder(self, single_column=False):
"""Force a reorder of the displayed items"""
if single_column:
columns = self.sortOrder[:1]
else:
columns = self.sortOrder
for ascending,column in columns[::-1]:
# Python 2.2+ guarantees stable sort, so sort by each column in reverse
# order will order by the assigned columns
self.sorted.sort( key=column.get, reverse=(not ascending)) | Force a reorder of the displayed items | Below is the the instruction that describes the task:
### Input:
Force a reorder of the displayed items
### Response:
def reorder(self, single_column=False):
"""Force a reorder of the displayed items"""
if single_column:
columns = self.sortOrder[:1]
else:
columns = self.sortOrder
for ascending,column in columns[::-1]:
# Python 2.2+ guarantees stable sort, so sort by each column in reverse
# order will order by the assigned columns
self.sorted.sort( key=column.get, reverse=(not ascending)) |
def bna_config_cmd_output_status_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd = ET.Element("bna_config_cmd")
config = bna_config_cmd
output = ET.SubElement(bna_config_cmd, "output")
status_string = ET.SubElement(output, "status-string")
status_string.text = kwargs.pop('status_string')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def bna_config_cmd_output_status_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd = ET.Element("bna_config_cmd")
config = bna_config_cmd
output = ET.SubElement(bna_config_cmd, "output")
status_string = ET.SubElement(output, "status-string")
status_string.text = kwargs.pop('status_string')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def accuracy_curve(self, delta_tau=0.01):
""" Computes the relationship between probability threshold
and classification accuracy. """
# compute thresholds based on the sorted probabilities
orig_thresh = self.threshold
sorted_labels, sorted_probs = self.sorted_values
scores = []
taus = []
tau = 0
for k in range(len(sorted_labels)):
# compute new accuracy
self.threshold = tau
scores.append(self.accuracy)
taus.append(tau)
# update threshold
tau = sorted_probs[k]
# add last datapoint
tau = 1.0
self.threshold = tau
scores.append(self.accuracy)
taus.append(tau)
self.threshold = orig_thresh
return scores, taus | Computes the relationship between probability threshold
and classification accuracy. | Below is the the instruction that describes the task:
### Input:
Computes the relationship between probability threshold
and classification accuracy.
### Response:
def accuracy_curve(self, delta_tau=0.01):
""" Computes the relationship between probability threshold
and classification accuracy. """
# compute thresholds based on the sorted probabilities
orig_thresh = self.threshold
sorted_labels, sorted_probs = self.sorted_values
scores = []
taus = []
tau = 0
for k in range(len(sorted_labels)):
# compute new accuracy
self.threshold = tau
scores.append(self.accuracy)
taus.append(tau)
# update threshold
tau = sorted_probs[k]
# add last datapoint
tau = 1.0
self.threshold = tau
scores.append(self.accuracy)
taus.append(tau)
self.threshold = orig_thresh
return scores, taus |
def _find_interfaces_ip(mac):
'''
Helper to search the interfaces IPs using the MAC address.
'''
try:
mac = napalm_helpers.convert(napalm_helpers.mac, mac)
except AddrFormatError:
return ('', '', [])
all_interfaces = _get_mine('net.interfaces')
all_ipaddrs = _get_mine('net.ipaddrs')
for device, device_interfaces in six.iteritems(all_interfaces):
if not device_interfaces.get('result', False):
continue
for interface, interface_details in six.iteritems(device_interfaces.get('out', {})):
try:
interface_mac = napalm_helpers.convert(napalm_helpers.mac, interface_details.get('mac_address'))
except AddrFormatError:
continue
if mac != interface_mac:
continue
interface_ipaddrs = all_ipaddrs.get(device, {}).get('out', {}).get(interface, {})
ip_addresses = interface_ipaddrs.get('ipv4', {})
ip_addresses.update(interface_ipaddrs.get('ipv6', {}))
interface_ips = ['{0}/{1}'.format(ip_addr,
addr_details.get('prefix_length', '32'))
for ip_addr, addr_details in six.iteritems(ip_addresses)]
return device, interface, interface_ips
return ('', '', []) | Helper to search the interfaces IPs using the MAC address. | Below is the the instruction that describes the task:
### Input:
Helper to search the interfaces IPs using the MAC address.
### Response:
def _find_interfaces_ip(mac):
'''
Helper to search the interfaces IPs using the MAC address.
'''
try:
mac = napalm_helpers.convert(napalm_helpers.mac, mac)
except AddrFormatError:
return ('', '', [])
all_interfaces = _get_mine('net.interfaces')
all_ipaddrs = _get_mine('net.ipaddrs')
for device, device_interfaces in six.iteritems(all_interfaces):
if not device_interfaces.get('result', False):
continue
for interface, interface_details in six.iteritems(device_interfaces.get('out', {})):
try:
interface_mac = napalm_helpers.convert(napalm_helpers.mac, interface_details.get('mac_address'))
except AddrFormatError:
continue
if mac != interface_mac:
continue
interface_ipaddrs = all_ipaddrs.get(device, {}).get('out', {}).get(interface, {})
ip_addresses = interface_ipaddrs.get('ipv4', {})
ip_addresses.update(interface_ipaddrs.get('ipv6', {}))
interface_ips = ['{0}/{1}'.format(ip_addr,
addr_details.get('prefix_length', '32'))
for ip_addr, addr_details in six.iteritems(ip_addresses)]
return device, interface, interface_ips
return ('', '', []) |
def user_absent(name):
'''
Ensure a user is not present
name
username to remove if it exists
Examples:
.. code-block:: yaml
delete:
onyx.user_absent:
- name: daniel
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
old_user = __salt__['onyx.cmd']('get_user', username=name)
if not old_user:
ret['result'] = True
ret['comment'] = 'User does not exist'
return ret
if __opts__['test'] is True and old_user:
ret['result'] = None
ret['comment'] = 'User will be removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret
__salt__['onyx.cmd']('remove_user', username=name)
if __salt__['onyx.cmd']('get_user', username=name):
ret['comment'] = 'Failed to remove user'
else:
ret['result'] = True
ret['comment'] = 'User removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret | Ensure a user is not present
name
username to remove if it exists
Examples:
.. code-block:: yaml
delete:
onyx.user_absent:
- name: daniel | Below is the the instruction that describes the task:
### Input:
Ensure a user is not present
name
username to remove if it exists
Examples:
.. code-block:: yaml
delete:
onyx.user_absent:
- name: daniel
### Response:
def user_absent(name):
'''
Ensure a user is not present
name
username to remove if it exists
Examples:
.. code-block:: yaml
delete:
onyx.user_absent:
- name: daniel
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
old_user = __salt__['onyx.cmd']('get_user', username=name)
if not old_user:
ret['result'] = True
ret['comment'] = 'User does not exist'
return ret
if __opts__['test'] is True and old_user:
ret['result'] = None
ret['comment'] = 'User will be removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret
__salt__['onyx.cmd']('remove_user', username=name)
if __salt__['onyx.cmd']('get_user', username=name):
ret['comment'] = 'Failed to remove user'
else:
ret['result'] = True
ret['comment'] = 'User removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret |
def package_info(self):
"""
:return: list of package info on installed packages
"""
import subprocess
# create a commandline like pip show Pillow show
package_names = self.installed_packages()
if not package_names:
# No installed packages yet, so nothign to do here...
return []
cmdline = [sys.executable, "-mpip"]
for name in package_names:
cmdline.extend(["show", name])
output = subprocess.check_output(cmdline)
# Python 3 fix
if not isinstance(output, str):
# Some package info is encoded in Latin-1 or something other than
# UTF8. Replace non-UTF characters with '?' instead of crashing.
output = str(output, encoding='UTF-8', errors='replace')
# parse output that looks like this example
"""
---
Name: Pillow
Version: 2.8.1
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/Pillow-2.8.1-py2.7-linux-x86_64.egg
Requires:
---
Name: vext.gi
Version: 0.5.6.25
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/vext.gi-0.5.6.25-py2.7.egg
Requires: vext
"""
results = []
for info in output[3:].split("---"):
d = {}
for line in info[1:].splitlines():
arg, _, value = line.partition(': ')
arg = arg.lower()
if arg == 'requires':
value = value.split(', ')
d[arg] = value
results.append(d)
return results | :return: list of package info on installed packages | Below is the the instruction that describes the task:
### Input:
:return: list of package info on installed packages
### Response:
def package_info(self):
"""
:return: list of package info on installed packages
"""
import subprocess
# create a commandline like pip show Pillow show
package_names = self.installed_packages()
if not package_names:
# No installed packages yet, so nothign to do here...
return []
cmdline = [sys.executable, "-mpip"]
for name in package_names:
cmdline.extend(["show", name])
output = subprocess.check_output(cmdline)
# Python 3 fix
if not isinstance(output, str):
# Some package info is encoded in Latin-1 or something other than
# UTF8. Replace non-UTF characters with '?' instead of crashing.
output = str(output, encoding='UTF-8', errors='replace')
# parse output that looks like this example
"""
---
Name: Pillow
Version: 2.8.1
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/Pillow-2.8.1-py2.7-linux-x86_64.egg
Requires:
---
Name: vext.gi
Version: 0.5.6.25
Location: /mnt/data/home/stu/.virtualenvs/shoebot-setup/lib/python2.7/site-packages/vext.gi-0.5.6.25-py2.7.egg
Requires: vext
"""
results = []
for info in output[3:].split("---"):
d = {}
for line in info[1:].splitlines():
arg, _, value = line.partition(': ')
arg = arg.lower()
if arg == 'requires':
value = value.split(', ')
d[arg] = value
results.append(d)
return results |
def setup_toolbar(self):
"""Setup toolbar"""
load_button = create_toolbutton(self, text=_('Import data'),
icon=ima.icon('fileimport'),
triggered=lambda: self.import_data())
self.save_button = create_toolbutton(self, text=_("Save data"),
icon=ima.icon('filesave'),
triggered=lambda: self.save_data(self.filename))
self.save_button.setEnabled(False)
save_as_button = create_toolbutton(self,
text=_("Save data as..."),
icon=ima.icon('filesaveas'),
triggered=self.save_data)
reset_namespace_button = create_toolbutton(
self, text=_("Remove all variables"),
icon=ima.icon('editdelete'), triggered=self.reset_namespace)
return [load_button, self.save_button, save_as_button,
reset_namespace_button] | Setup toolbar | Below is the the instruction that describes the task:
### Input:
Setup toolbar
### Response:
def setup_toolbar(self):
"""Setup toolbar"""
load_button = create_toolbutton(self, text=_('Import data'),
icon=ima.icon('fileimport'),
triggered=lambda: self.import_data())
self.save_button = create_toolbutton(self, text=_("Save data"),
icon=ima.icon('filesave'),
triggered=lambda: self.save_data(self.filename))
self.save_button.setEnabled(False)
save_as_button = create_toolbutton(self,
text=_("Save data as..."),
icon=ima.icon('filesaveas'),
triggered=self.save_data)
reset_namespace_button = create_toolbutton(
self, text=_("Remove all variables"),
icon=ima.icon('editdelete'), triggered=self.reset_namespace)
return [load_button, self.save_button, save_as_button,
reset_namespace_button] |
def qteStartRecordingHook(self, msgObj):
"""
Commence macro recording.
Macros are recorded by connecting to the 'keypressed' signal
it emits.
If the recording has already commenced, or if this method was
called during a macro replay, then return immediately.
"""
if self.qteRecording:
self.qteMain.qteStatus('Macro recording already enabled')
return
# Update status flag.
self.qteRecording = True
# Reset the variables.
self.qteMain.qteStatus('Macro recording started')
self.recorded_keysequence = QtmacsKeysequence()
# Connect the 'keypressed' and 'abort' signals.
self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress)
self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook) | Commence macro recording.
Macros are recorded by connecting to the 'keypressed' signal
it emits.
If the recording has already commenced, or if this method was
called during a macro replay, then return immediately. | Below is the the instruction that describes the task:
### Input:
Commence macro recording.
Macros are recorded by connecting to the 'keypressed' signal
it emits.
If the recording has already commenced, or if this method was
called during a macro replay, then return immediately.
### Response:
def qteStartRecordingHook(self, msgObj):
"""
Commence macro recording.
Macros are recorded by connecting to the 'keypressed' signal
it emits.
If the recording has already commenced, or if this method was
called during a macro replay, then return immediately.
"""
if self.qteRecording:
self.qteMain.qteStatus('Macro recording already enabled')
return
# Update status flag.
self.qteRecording = True
# Reset the variables.
self.qteMain.qteStatus('Macro recording started')
self.recorded_keysequence = QtmacsKeysequence()
# Connect the 'keypressed' and 'abort' signals.
self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress)
self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook) |
def _walk(self, root_path='', root_id=''):
''' a generator method which walks the file structure of the dropbox collection '''
title = '%s._walk' % self.__class__.__name__
if root_id:
pass
elif root_path:
root_id, root_parent = self._get_id(root_path)
for file_id, name, mimetype in self._list_directory(root_id):
file_path = os.path.join(root_path, name)
if mimetype == 'application/vnd.google-apps.folder':
for path, id in self._walk(root_path=file_path, root_id=file_id):
yield path, id
else:
yield file_path, file_id | a generator method which walks the file structure of the dropbox collection | Below is the the instruction that describes the task:
### Input:
a generator method which walks the file structure of the dropbox collection
### Response:
def _walk(self, root_path='', root_id=''):
''' a generator method which walks the file structure of the dropbox collection '''
title = '%s._walk' % self.__class__.__name__
if root_id:
pass
elif root_path:
root_id, root_parent = self._get_id(root_path)
for file_id, name, mimetype in self._list_directory(root_id):
file_path = os.path.join(root_path, name)
if mimetype == 'application/vnd.google-apps.folder':
for path, id in self._walk(root_path=file_path, root_id=file_id):
yield path, id
else:
yield file_path, file_id |
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, 'th') for t in
self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer | Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th> | Below is the the instruction that describes the task:
### Input:
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
### Response:
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, 'th') for t in
self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer |
def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False):
"""Creates an action class based on a dict loaded using load_grouped_actions()
"""
actions = load_grouped_actions(spec, pop_keys=pop_keys)
attrs = {"actions": actions, "name": name}
if "as" in spec:
attrs["as_"] = spec["as"]
if pop_keys:
del spec["as"]
for k in ("requires", "methods", "defaults", "default_option"):
if k in spec:
attrs[k] = spec[k]
if pop_keys:
del spec[k]
return metaclass(name, (base_class,), attrs) | Creates an action class based on a dict loaded using load_grouped_actions() | Below is the the instruction that describes the task:
### Input:
Creates an action class based on a dict loaded using load_grouped_actions()
### Response:
def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False):
"""Creates an action class based on a dict loaded using load_grouped_actions()
"""
actions = load_grouped_actions(spec, pop_keys=pop_keys)
attrs = {"actions": actions, "name": name}
if "as" in spec:
attrs["as_"] = spec["as"]
if pop_keys:
del spec["as"]
for k in ("requires", "methods", "defaults", "default_option"):
if k in spec:
attrs[k] = spec[k]
if pop_keys:
del spec[k]
return metaclass(name, (base_class,), attrs) |
def _action_set_subsumption(self, action_set):
"""Perform action set subsumption."""
# Select a condition with maximum bit count among those having
# sufficient experience and sufficiently low error.
selected_rule = None
selected_bit_count = None
for rule in action_set:
if not (rule.experience > self.subsumption_threshold and
rule.error < self.error_threshold):
continue
bit_count = rule.condition.count()
if (selected_rule is None or
bit_count > selected_bit_count or
(bit_count == selected_bit_count and
random.randrange(2))):
selected_rule = rule
selected_bit_count = bit_count
# If no rule was found satisfying the requirements, return
# early.
if selected_rule is None:
return
# Subsume each rule which the selected rule generalizes. When a
# rule is subsumed, all instances of the subsumed rule are replaced
# with instances of the more general one in the population.
to_remove = []
for rule in action_set:
if (selected_rule is not rule and
selected_rule.condition(rule.condition)):
selected_rule.numerosity += rule.numerosity
action_set.model.discard(rule, rule.numerosity)
to_remove.append(rule)
for rule in to_remove:
action_set.remove(rule) | Perform action set subsumption. | Below is the the instruction that describes the task:
### Input:
Perform action set subsumption.
### Response:
def _action_set_subsumption(self, action_set):
"""Perform action set subsumption."""
# Select a condition with maximum bit count among those having
# sufficient experience and sufficiently low error.
selected_rule = None
selected_bit_count = None
for rule in action_set:
if not (rule.experience > self.subsumption_threshold and
rule.error < self.error_threshold):
continue
bit_count = rule.condition.count()
if (selected_rule is None or
bit_count > selected_bit_count or
(bit_count == selected_bit_count and
random.randrange(2))):
selected_rule = rule
selected_bit_count = bit_count
# If no rule was found satisfying the requirements, return
# early.
if selected_rule is None:
return
# Subsume each rule which the selected rule generalizes. When a
# rule is subsumed, all instances of the subsumed rule are replaced
# with instances of the more general one in the population.
to_remove = []
for rule in action_set:
if (selected_rule is not rule and
selected_rule.condition(rule.condition)):
selected_rule.numerosity += rule.numerosity
action_set.model.discard(rule, rule.numerosity)
to_remove.append(rule)
for rule in to_remove:
action_set.remove(rule) |
def _version_less_than_or_equal_to(self, v1, v2):
""" Returns true if v1 <= v2. """
# pylint: disable=no-name-in-module, import-error
from distutils.version import LooseVersion
return LooseVersion(v1) <= LooseVersion(v2) | Returns true if v1 <= v2. | Below is the the instruction that describes the task:
### Input:
Returns true if v1 <= v2.
### Response:
def _version_less_than_or_equal_to(self, v1, v2):
""" Returns true if v1 <= v2. """
# pylint: disable=no-name-in-module, import-error
from distutils.version import LooseVersion
return LooseVersion(v1) <= LooseVersion(v2) |
def is_str(arg):
'''
is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields
False otherwise.
'''
return (isinstance(arg, six.string_types) or
is_npscalar(arg, 'string') or
is_npvalue(arg, 'string')) | is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields
False otherwise. | Below is the the instruction that describes the task:
### Input:
is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields
False otherwise.
### Response:
def is_str(arg):
'''
is_str(x) yields True if x is a string object or a 0-dim numpy array of a string and yields
False otherwise.
'''
return (isinstance(arg, six.string_types) or
is_npscalar(arg, 'string') or
is_npvalue(arg, 'string')) |
def resolve_symbols(self, database, link_resolver, page=None):
"""Will call resolve_symbols on all the stale subpages of the tree.
Args:
page: hotdoc.core.tree.Page, the page to resolve symbols in,
will recurse on potential subpages.
"""
page = page or self.root
if page.ast is None and not page.generated:
with io.open(page.source_file, 'r', encoding='utf-8') as _:
page.ast = cmark.hotdoc_to_ast(_.read(), self)
page.resolve_symbols(self, database, link_resolver)
self.__update_dep_map(page, page.symbols)
for pagename in page.subpages:
cpage = self.__all_pages[pagename]
self.resolve_symbols(database, link_resolver, page=cpage) | Will call resolve_symbols on all the stale subpages of the tree.
Args:
page: hotdoc.core.tree.Page, the page to resolve symbols in,
will recurse on potential subpages. | Below is the the instruction that describes the task:
### Input:
Will call resolve_symbols on all the stale subpages of the tree.
Args:
page: hotdoc.core.tree.Page, the page to resolve symbols in,
will recurse on potential subpages.
### Response:
def resolve_symbols(self, database, link_resolver, page=None):
"""Will call resolve_symbols on all the stale subpages of the tree.
Args:
page: hotdoc.core.tree.Page, the page to resolve symbols in,
will recurse on potential subpages.
"""
page = page or self.root
if page.ast is None and not page.generated:
with io.open(page.source_file, 'r', encoding='utf-8') as _:
page.ast = cmark.hotdoc_to_ast(_.read(), self)
page.resolve_symbols(self, database, link_resolver)
self.__update_dep_map(page, page.symbols)
for pagename in page.subpages:
cpage = self.__all_pages[pagename]
self.resolve_symbols(database, link_resolver, page=cpage) |
def get_state_repr(self, path):
"""
Returns the current state, or sub-state, depending on the path.
"""
if path == "ips":
return {
"failed_ips" : self.failed_ips,
"questionable_ips" : self.questionable_ips,
"working_set" : self.working_set,
}
if path == "route_info":
return {
"route_spec" : self.route_spec,
"routes" : self.routes,
"ignore_routes" : self.ignore_routes
}
if path == "plugins":
return self.get_plugins_info()
if path == "vpc":
return self.vpc_state
if path == "":
return {
"SERVER" : {
"version" : self.versions,
"start_time" : self.starttime.isoformat(),
"current_time" : datetime.datetime.now().isoformat()
},
"params" : self.render_main_params(),
"plugins" : {"_href" : "/plugins"},
"ips" : {"_href" : "/ips"},
"route_info" : {"_href" : "/route_info"},
"vpc" : {"_href" : "/vpc"}
} | Returns the current state, or sub-state, depending on the path. | Below is the the instruction that describes the task:
### Input:
Returns the current state, or sub-state, depending on the path.
### Response:
def get_state_repr(self, path):
"""
Returns the current state, or sub-state, depending on the path.
"""
if path == "ips":
return {
"failed_ips" : self.failed_ips,
"questionable_ips" : self.questionable_ips,
"working_set" : self.working_set,
}
if path == "route_info":
return {
"route_spec" : self.route_spec,
"routes" : self.routes,
"ignore_routes" : self.ignore_routes
}
if path == "plugins":
return self.get_plugins_info()
if path == "vpc":
return self.vpc_state
if path == "":
return {
"SERVER" : {
"version" : self.versions,
"start_time" : self.starttime.isoformat(),
"current_time" : datetime.datetime.now().isoformat()
},
"params" : self.render_main_params(),
"plugins" : {"_href" : "/plugins"},
"ips" : {"_href" : "/ips"},
"route_info" : {"_href" : "/route_info"},
"vpc" : {"_href" : "/vpc"}
} |
def merge_into_nodeset(target, source):
"""Place all the nodes from the source node-set into the target
node-set, preserving document order. Both node-sets must be in
document order to begin with.
"""
if len(target) == 0:
target.extend(source)
return
source = [n for n in source if n not in target]
if len(source) == 0:
return
# If the last node in the target set comes before the first node in the
# source set, then we can just concatenate the sets. Otherwise, we
# will need to sort. (We could also check to see if the last node in
# the source set comes before the first node in the target set, but this
# situation is very unlikely in practice.)
if document_order(target[-1]) < document_order(source[0]):
target.extend(source)
else:
target.extend(source)
target.sort(key=document_order) | Place all the nodes from the source node-set into the target
node-set, preserving document order. Both node-sets must be in
document order to begin with. | Below is the the instruction that describes the task:
### Input:
Place all the nodes from the source node-set into the target
node-set, preserving document order. Both node-sets must be in
document order to begin with.
### Response:
def merge_into_nodeset(target, source):
"""Place all the nodes from the source node-set into the target
node-set, preserving document order. Both node-sets must be in
document order to begin with.
"""
if len(target) == 0:
target.extend(source)
return
source = [n for n in source if n not in target]
if len(source) == 0:
return
# If the last node in the target set comes before the first node in the
# source set, then we can just concatenate the sets. Otherwise, we
# will need to sort. (We could also check to see if the last node in
# the source set comes before the first node in the target set, but this
# situation is very unlikely in practice.)
if document_order(target[-1]) < document_order(source[0]):
target.extend(source)
else:
target.extend(source)
target.sort(key=document_order) |
def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(
neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique())
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments)
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments)
## Post-processing and formatting
state = {'verbose': verbose,
'radius': radius,
'min_core_neighbors': min_core_neighbors,
'distance': knn_model.distance,
'num_distance_components': knn_model.num_distance_components,
'num_examples': dataset.num_rows(),
'features': knn_model.features,
'num_features': knn_model.num_features,
'unpacked_features': knn_model.unpacked_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'cluster_id': master_assignments,
'num_clusters': num_clusters,
'training_time': _time.time() - start_time}
return DBSCANModel(state) | Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns] | Below is the the instruction that describes the task:
### Input:
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
### Response:
def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(
neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique())
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments)
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments)
## Post-processing and formatting
state = {'verbose': verbose,
'radius': radius,
'min_core_neighbors': min_core_neighbors,
'distance': knn_model.distance,
'num_distance_components': knn_model.num_distance_components,
'num_examples': dataset.num_rows(),
'features': knn_model.features,
'num_features': knn_model.num_features,
'unpacked_features': knn_model.unpacked_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'cluster_id': master_assignments,
'num_clusters': num_clusters,
'training_time': _time.time() - start_time}
return DBSCANModel(state) |
def flip(self, reactions):
"""Flip the specified reactions."""
for reaction in reactions:
if reaction in self._flipped:
self._flipped.remove(reaction)
else:
self._flipped.add(reaction) | Flip the specified reactions. | Below is the the instruction that describes the task:
### Input:
Flip the specified reactions.
### Response:
def flip(self, reactions):
"""Flip the specified reactions."""
for reaction in reactions:
if reaction in self._flipped:
self._flipped.remove(reaction)
else:
self._flipped.add(reaction) |
def available(name):
'''
Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
cmd = '{0} get {1}'.format(_cmd(), name)
if __salt__['cmd.retcode'](cmd) == 2:
return False
return True | Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd | Below is the the instruction that describes the task:
### Input:
Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
### Response:
def available(name):
'''
Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
cmd = '{0} get {1}'.format(_cmd(), name)
if __salt__['cmd.retcode'](cmd) == 2:
return False
return True |
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("no directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("directory does not exist")
return
self.raw_datadir = directory | Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory) | Below is the the instruction that describes the task:
### Input:
Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
### Response:
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("no directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("directory does not exist")
return
self.raw_datadir = directory |
def dav_index(context, data):
"""List files in a WebDAV directory."""
# This is made to work with ownCloud/nextCloud, but some rumor has
# it they are "standards compliant" and it should thus work for
# other DAV servers.
url = data.get('url')
result = context.http.request('PROPFIND', url)
for resp in result.xml.findall('./{DAV:}response'):
href = resp.findtext('./{DAV:}href')
if href is None:
continue
rurl = urljoin(url, href)
rdata = data.copy()
rdata['url'] = rurl
rdata['foreign_id'] = rurl
if rdata['url'] == url:
continue
if resp.find('.//{DAV:}collection') is not None:
rdata['parent_foreign_id'] = rurl
context.log.info("Fetching contents of folder: %s" % rurl)
context.recurse(data=rdata)
else:
rdata['parent_foreign_id'] = url
# Do GET requests on the urls
fetch(context, rdata) | List files in a WebDAV directory. | Below is the the instruction that describes the task:
### Input:
List files in a WebDAV directory.
### Response:
def dav_index(context, data):
"""List files in a WebDAV directory."""
# This is made to work with ownCloud/nextCloud, but some rumor has
# it they are "standards compliant" and it should thus work for
# other DAV servers.
url = data.get('url')
result = context.http.request('PROPFIND', url)
for resp in result.xml.findall('./{DAV:}response'):
href = resp.findtext('./{DAV:}href')
if href is None:
continue
rurl = urljoin(url, href)
rdata = data.copy()
rdata['url'] = rurl
rdata['foreign_id'] = rurl
if rdata['url'] == url:
continue
if resp.find('.//{DAV:}collection') is not None:
rdata['parent_foreign_id'] = rurl
context.log.info("Fetching contents of folder: %s" % rurl)
context.recurse(data=rdata)
else:
rdata['parent_foreign_id'] = url
# Do GET requests on the urls
fetch(context, rdata) |
def IntervalReader( f ):
"""
Iterator yielding chrom, start, end, strand, value.
Values are zero-based, half-open.
Regions which lack a score are ignored.
"""
current_chrom = None
current_pos = None
current_step = None
# always for wiggle data
strand = '+'
mode = "bed"
for line in f:
if line.isspace() or line.startswith( "track" ) or line.startswith( "#" ) or line.startswith( "browser" ):
continue
elif line.startswith( "variableStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = None
current_step = None
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "variableStep"
elif line.startswith( "fixedStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = int( header['start'] ) - 1
current_step = int( header['step'] )
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "fixedStep"
elif mode == "bed":
fields = line.split()
if len( fields ) > 3:
if len( fields ) > 5:
yield fields[0], int( fields[1] ), int( fields[2] ), fields[5], float( fields[3] )
else:
yield fields[0], int( fields[1] ), int( fields[2] ), strand, float( fields[3] )
elif mode == "variableStep":
fields = line.split()
pos = int( fields[0] ) - 1
yield current_chrom, pos, pos + current_span, strand, float( fields[1] )
elif mode == "fixedStep":
yield current_chrom, current_pos, current_pos + current_span, strand, float( line.split()[0] )
current_pos += current_step
else:
raise ValueError("Unexpected input line: %s" % line.strip()) | Iterator yielding chrom, start, end, strand, value.
Values are zero-based, half-open.
Regions which lack a score are ignored. | Below is the the instruction that describes the task:
### Input:
Iterator yielding chrom, start, end, strand, value.
Values are zero-based, half-open.
Regions which lack a score are ignored.
### Response:
def IntervalReader( f ):
"""
Iterator yielding chrom, start, end, strand, value.
Values are zero-based, half-open.
Regions which lack a score are ignored.
"""
current_chrom = None
current_pos = None
current_step = None
# always for wiggle data
strand = '+'
mode = "bed"
for line in f:
if line.isspace() or line.startswith( "track" ) or line.startswith( "#" ) or line.startswith( "browser" ):
continue
elif line.startswith( "variableStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = None
current_step = None
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "variableStep"
elif line.startswith( "fixedStep" ):
header = parse_header( line )
current_chrom = header['chrom']
current_pos = int( header['start'] ) - 1
current_step = int( header['step'] )
if 'span' in header: current_span = int( header['span'] )
else: current_span = 1
mode = "fixedStep"
elif mode == "bed":
fields = line.split()
if len( fields ) > 3:
if len( fields ) > 5:
yield fields[0], int( fields[1] ), int( fields[2] ), fields[5], float( fields[3] )
else:
yield fields[0], int( fields[1] ), int( fields[2] ), strand, float( fields[3] )
elif mode == "variableStep":
fields = line.split()
pos = int( fields[0] ) - 1
yield current_chrom, pos, pos + current_span, strand, float( fields[1] )
elif mode == "fixedStep":
yield current_chrom, current_pos, current_pos + current_span, strand, float( line.split()[0] )
current_pos += current_step
else:
raise ValueError("Unexpected input line: %s" % line.strip()) |
def add_delta_step(self, delta: float):
"""
Inform Metrics class about time to step in environment.
"""
if self.delta_last_experience_collection:
self.delta_last_experience_collection += delta
else:
self.delta_last_experience_collection = delta | Inform Metrics class about time to step in environment. | Below is the the instruction that describes the task:
### Input:
Inform Metrics class about time to step in environment.
### Response:
def add_delta_step(self, delta: float):
"""
Inform Metrics class about time to step in environment.
"""
if self.delta_last_experience_collection:
self.delta_last_experience_collection += delta
else:
self.delta_last_experience_collection = delta |
def get_abstract_locations(self, addr, size):
"""
Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none.
"""
ret = [ ]
for aloc in self._alocs.values():
for seg in aloc.segments:
if seg.offset >= addr and seg.offset < addr + size:
ret.append(aloc)
break
return ret | Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none. | Below is the the instruction that describes the task:
### Input:
Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none.
### Response:
def get_abstract_locations(self, addr, size):
"""
Get a list of abstract locations that is within the range of [addr, addr + size]
This implementation is pretty slow. But since this method won't be called frequently, we can live with the bad
implementation for now.
:param addr: Starting address of the memory region.
:param size: Size of the memory region, in bytes.
:return: A list of covered AbstractLocation objects, or an empty list if there is none.
"""
ret = [ ]
for aloc in self._alocs.values():
for seg in aloc.segments:
if seg.offset >= addr and seg.offset < addr + size:
ret.append(aloc)
break
return ret |
def destroy(self):
'''Unsubscribes callback from observable'''
self._observable.release(self._key, self._callback)
self._observable = None
self._key = None
self._callback = None | Unsubscribes callback from observable | Below is the the instruction that describes the task:
### Input:
Unsubscribes callback from observable
### Response:
def destroy(self):
'''Unsubscribes callback from observable'''
self._observable.release(self._key, self._callback)
self._observable = None
self._key = None
self._callback = None |
def write(self, version):
# type: (str) -> None
""" Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
"""
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) | Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version. | Below is the the instruction that describes the task:
### Input:
Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
### Response:
def write(self, version):
# type: (str) -> None
""" Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
"""
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) |
def draw(self):
"""Draw the figures and those that are shared and have been changed"""
for fig in self.figs2draw:
fig.canvas.draw()
self._figs2draw.clear() | Draw the figures and those that are shared and have been changed | Below is the the instruction that describes the task:
### Input:
Draw the figures and those that are shared and have been changed
### Response:
def draw(self):
"""Draw the figures and those that are shared and have been changed"""
for fig in self.figs2draw:
fig.canvas.draw()
self._figs2draw.clear() |
def get_status_code_and_schema_rst(self, responses):
'''
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
'''
for status_code, response_schema in responses.items():
status_code = int(status_code)
schema = response_schema.get('schema', None)
status = HTTP_STATUS_CODES.get(status_code, None)
if status is None or not (100 < status_code < 300):
continue
self.write('**Example Response**', 1)
self.write('')
self.write('.. code-block:: http', 1)
self.write('')
self.write('HTTP/1.1 {} {}'.format(status_code, status), 2)
self.write('Vary: {}'.format(response_schema['description']), 2)
self.write('Content-Type: application/json', 2)
self.write('')
if schema:
self.schema_handler(schema)
else:
self.write('{}', self.indent_depth) | Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return: | Below is the the instruction that describes the task:
### Input:
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
### Response:
def get_status_code_and_schema_rst(self, responses):
'''
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
'''
for status_code, response_schema in responses.items():
status_code = int(status_code)
schema = response_schema.get('schema', None)
status = HTTP_STATUS_CODES.get(status_code, None)
if status is None or not (100 < status_code < 300):
continue
self.write('**Example Response**', 1)
self.write('')
self.write('.. code-block:: http', 1)
self.write('')
self.write('HTTP/1.1 {} {}'.format(status_code, status), 2)
self.write('Vary: {}'.format(response_schema['description']), 2)
self.write('Content-Type: application/json', 2)
self.write('')
if schema:
self.schema_handler(schema)
else:
self.write('{}', self.indent_depth) |
def optionsFromEnvironment(defaults=None):
"""Fetch root URL and credentials from the standard TASKCLUSTER_β¦
environment variables and return them in a format suitable for passing to a
client constructor."""
options = defaults or {}
credentials = options.get('credentials', {})
rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL')
if rootUrl:
options['rootUrl'] = rootUrl
clientId = os.environ.get('TASKCLUSTER_CLIENT_ID')
if clientId:
credentials['clientId'] = clientId
accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN')
if accessToken:
credentials['accessToken'] = accessToken
certificate = os.environ.get('TASKCLUSTER_CERTIFICATE')
if certificate:
credentials['certificate'] = certificate
if credentials:
options['credentials'] = credentials
return options | Fetch root URL and credentials from the standard TASKCLUSTER_β¦
environment variables and return them in a format suitable for passing to a
client constructor. | Below is the the instruction that describes the task:
### Input:
Fetch root URL and credentials from the standard TASKCLUSTER_β¦
environment variables and return them in a format suitable for passing to a
client constructor.
### Response:
def optionsFromEnvironment(defaults=None):
"""Fetch root URL and credentials from the standard TASKCLUSTER_β¦
environment variables and return them in a format suitable for passing to a
client constructor."""
options = defaults or {}
credentials = options.get('credentials', {})
rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL')
if rootUrl:
options['rootUrl'] = rootUrl
clientId = os.environ.get('TASKCLUSTER_CLIENT_ID')
if clientId:
credentials['clientId'] = clientId
accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN')
if accessToken:
credentials['accessToken'] = accessToken
certificate = os.environ.get('TASKCLUSTER_CERTIFICATE')
if certificate:
credentials['certificate'] = certificate
if credentials:
options['credentials'] = credentials
return options |
def connect_partial_nodirect(self, config):
"""
Create a partially-connected genome,
with (unless no hidden nodes) no direct input-output connections."""
assert 0 <= config.connection_fraction <= 1
all_connections = self.compute_full_connections(config, False)
shuffle(all_connections)
num_to_add = int(round(len(all_connections) * config.connection_fraction))
for input_id, output_id in all_connections[:num_to_add]:
connection = self.create_connection(config, input_id, output_id)
self.connections[connection.key] = connection | Create a partially-connected genome,
with (unless no hidden nodes) no direct input-output connections. | Below is the the instruction that describes the task:
### Input:
Create a partially-connected genome,
with (unless no hidden nodes) no direct input-output connections.
### Response:
def connect_partial_nodirect(self, config):
"""
Create a partially-connected genome,
with (unless no hidden nodes) no direct input-output connections."""
assert 0 <= config.connection_fraction <= 1
all_connections = self.compute_full_connections(config, False)
shuffle(all_connections)
num_to_add = int(round(len(all_connections) * config.connection_fraction))
for input_id, output_id in all_connections[:num_to_add]:
connection = self.create_connection(config, input_id, output_id)
self.connections[connection.key] = connection |
def iter_initial_relations(self, subject_graph):
"""Iterate over all valid initial relations for a match"""
vertex0 = 0
for vertex1 in range(subject_graph.num_vertices):
yield vertex0, vertex1 | Iterate over all valid initial relations for a match | Below is the the instruction that describes the task:
### Input:
Iterate over all valid initial relations for a match
### Response:
def iter_initial_relations(self, subject_graph):
"""Iterate over all valid initial relations for a match"""
vertex0 = 0
for vertex1 in range(subject_graph.num_vertices):
yield vertex0, vertex1 |
def get_scalar(mesh, name, preference='cell', info=False, err=False):
""" Searches both point and cell data for an array
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the perfered scalar type to
search for in the dataset. Must be either ``'point'`` or ``'cell'``
info : bool
Return info about the scalar rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
parr = point_scalar(mesh, name)
carr = cell_scalar(mesh, name)
if isinstance(preference, str):
if preference in ['cell', 'c', 'cells']:
preference = CELL_DATA_FIELD
elif preference in ['point', 'p', 'points']:
preference = POINT_DATA_FIELD
else:
raise RuntimeError('Data field ({}) not supported.'.format(preference))
if all([parr is not None, carr is not None]):
if preference == CELL_DATA_FIELD:
if info:
return carr, CELL_DATA_FIELD
else:
return carr
elif preference == POINT_DATA_FIELD:
if info:
return parr, POINT_DATA_FIELD
else:
return parr
else:
raise RuntimeError('Data field ({}) not supported.'.format(preference))
arr = None
field = None
if parr is not None:
arr = parr
field = 0
elif carr is not None:
arr = carr
field = 1
elif err:
raise KeyError('Data scalar ({}) not present in this dataset.'.format(name))
if info:
return arr, field
return arr | Searches both point and cell data for an array
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the perfered scalar type to
search for in the dataset. Must be either ``'point'`` or ``'cell'``
info : bool
Return info about the scalar rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present. | Below is the the instruction that describes the task:
### Input:
Searches both point and cell data for an array
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the perfered scalar type to
search for in the dataset. Must be either ``'point'`` or ``'cell'``
info : bool
Return info about the scalar rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
### Response:
def get_scalar(mesh, name, preference='cell', info=False, err=False):
""" Searches both point and cell data for an array
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the perfered scalar type to
search for in the dataset. Must be either ``'point'`` or ``'cell'``
info : bool
Return info about the scalar rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
parr = point_scalar(mesh, name)
carr = cell_scalar(mesh, name)
if isinstance(preference, str):
if preference in ['cell', 'c', 'cells']:
preference = CELL_DATA_FIELD
elif preference in ['point', 'p', 'points']:
preference = POINT_DATA_FIELD
else:
raise RuntimeError('Data field ({}) not supported.'.format(preference))
if all([parr is not None, carr is not None]):
if preference == CELL_DATA_FIELD:
if info:
return carr, CELL_DATA_FIELD
else:
return carr
elif preference == POINT_DATA_FIELD:
if info:
return parr, POINT_DATA_FIELD
else:
return parr
else:
raise RuntimeError('Data field ({}) not supported.'.format(preference))
arr = None
field = None
if parr is not None:
arr = parr
field = 0
elif carr is not None:
arr = carr
field = 1
elif err:
raise KeyError('Data scalar ({}) not present in this dataset.'.format(name))
if info:
return arr, field
return arr |
def eligible_cost(self, column=None, value=None, **kwargs):
"""
The assistance dollar amounts by eligible cost category.
>>> GICS().eligible_cost('amount', 100000)
"""
return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs) | The assistance dollar amounts by eligible cost category.
>>> GICS().eligible_cost('amount', 100000) | Below is the the instruction that describes the task:
### Input:
The assistance dollar amounts by eligible cost category.
>>> GICS().eligible_cost('amount', 100000)
### Response:
def eligible_cost(self, column=None, value=None, **kwargs):
"""
The assistance dollar amounts by eligible cost category.
>>> GICS().eligible_cost('amount', 100000)
"""
return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs) |
def bez2poly(bez, numpy_ordering=True, return_poly1d=False):
"""Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes."""
if is_bezier_segment(bez):
bez = bez.bpoints()
return bezier2polynomial(bez,
numpy_ordering=numpy_ordering,
return_poly1d=return_poly1d) | Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes. | Below is the the instruction that describes the task:
### Input:
Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes.
### Response:
def bez2poly(bez, numpy_ordering=True, return_poly1d=False):
"""Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes."""
if is_bezier_segment(bez):
bez = bez.bpoints()
return bezier2polynomial(bez,
numpy_ordering=numpy_ordering,
return_poly1d=return_poly1d) |
def get_recipe(cls, name, ctx):
'''Returns the Recipe with the given name, if it exists.'''
name = name.lower()
if not hasattr(cls, "recipes"):
cls.recipes = {}
if name in cls.recipes:
return cls.recipes[name]
recipe_file = None
for recipes_dir in cls.recipe_dirs(ctx):
if not exists(recipes_dir):
continue
# Find matching folder (may differ in case):
for subfolder in listdir(recipes_dir):
if subfolder.lower() == name:
recipe_file = join(recipes_dir, subfolder, '__init__.py')
if exists(recipe_file):
name = subfolder # adapt to actual spelling
break
recipe_file = None
if recipe_file is not None:
break
if not recipe_file:
raise ValueError('Recipe does not exist: {}'.format(name))
mod = import_recipe('pythonforandroid.recipes.{}'.format(name), recipe_file)
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
recipe = mod.recipe
recipe.ctx = ctx
cls.recipes[name.lower()] = recipe
return recipe | Returns the Recipe with the given name, if it exists. | Below is the the instruction that describes the task:
### Input:
Returns the Recipe with the given name, if it exists.
### Response:
def get_recipe(cls, name, ctx):
'''Returns the Recipe with the given name, if it exists.'''
name = name.lower()
if not hasattr(cls, "recipes"):
cls.recipes = {}
if name in cls.recipes:
return cls.recipes[name]
recipe_file = None
for recipes_dir in cls.recipe_dirs(ctx):
if not exists(recipes_dir):
continue
# Find matching folder (may differ in case):
for subfolder in listdir(recipes_dir):
if subfolder.lower() == name:
recipe_file = join(recipes_dir, subfolder, '__init__.py')
if exists(recipe_file):
name = subfolder # adapt to actual spelling
break
recipe_file = None
if recipe_file is not None:
break
if not recipe_file:
raise ValueError('Recipe does not exist: {}'.format(name))
mod = import_recipe('pythonforandroid.recipes.{}'.format(name), recipe_file)
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
recipe = mod.recipe
recipe.ctx = ctx
cls.recipes[name.lower()] = recipe
return recipe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.