sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def make_root(self, name): # noqa: D302
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if (name != self.root_name) and (self._node_in_tree(name)):
for key in [node for node in self.nodes if node.find(name) != 0]:
del self._db[key]
self._db[name]["parent"] = ""
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
) | r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2 | entailment |
def print_node(self, name): # noqa: D302
r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7]
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
node = self._db[name]
children = (
[self._split_node_name(child)[-1] for child in node["children"]]
if node["children"]
else node["children"]
)
data = (
node["data"][0]
if node["data"] and (len(node["data"]) == 1)
else node["data"]
)
return (
"Name: {node_name}\n"
"Parent: {parent_name}\n"
"Children: {children_list}\n"
"Data: {node_data}".format(
node_name=name,
parent_name=node["parent"] if node["parent"] else None,
children_list=", ".join(children) if children else None,
node_data=data if data else None,
)
) | r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7] | entailment |
def rename_node(self, name, new_name): # noqa: D302
r"""
Rename a tree node.
It is typical to have a root node name with more than one hierarchy
level after using :py:meth:`ptrie.Trie.make_root`. In this instance the
root node *can* be renamed as long as the new root name has the same or
less hierarchy levels as the existing root name
:param name: Node name to rename
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Argument \`new_name\` has an illegal root node)
* RuntimeError (Argument \`new_name\` is an illegal root node name)
* RuntimeError (Argument \`new_name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
* RuntimeError (Node *[new_name]* already exists)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.rename_node(
... 'root.branch1.leaf1',
... 'root.branch1.mapleleaf1'
... )
>>> print(tobj)
root
├branch1 (*)
│├leaf2 (*)
││└subleaf2
│└mapleleaf1
│ └subleaf1 (*)
└branch2
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if self._validate_node_name(new_name):
raise RuntimeError("Argument `new_name` is not valid")
self._node_in_tree(name)
if self.in_tree(new_name) and (name != self.root_name):
raise RuntimeError("Node {0} already exists".format(new_name))
sep = self._node_separator
if (name.split(sep)[:-1] != new_name.split(sep)[:-1]) and (
name != self.root_name
):
raise RuntimeError("Argument `new_name` has an illegal root node")
old_hierarchy_length = len(name.split(self._node_separator))
new_hierarchy_length = len(new_name.split(self._node_separator))
if (name == self.root_name) and (old_hierarchy_length < new_hierarchy_length):
raise RuntimeError("Argument `new_name` is an illegal root node name")
self._rename_node(name, new_name) | r"""
Rename a tree node.
It is typical to have a root node name with more than one hierarchy
level after using :py:meth:`ptrie.Trie.make_root`. In this instance the
root node *can* be renamed as long as the new root name has the same or
less hierarchy levels as the existing root name
:param name: Node name to rename
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Argument \`new_name\` has an illegal root node)
* RuntimeError (Argument \`new_name\` is an illegal root node name)
* RuntimeError (Argument \`new_name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
* RuntimeError (Node *[new_name]* already exists)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.rename_node(
... 'root.branch1.leaf1',
... 'root.branch1.mapleleaf1'
... )
>>> print(tobj)
root
├branch1 (*)
│├leaf2 (*)
││└subleaf2
│└mapleleaf1
│ └subleaf1 (*)
└branch2 | entailment |
def search_tree(self, name): # noqa: D302
r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
return self._search_tree(name) | r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf'] | entailment |
def find_root(filename, target='bids'):
"""Find base directory (root) for a filename.
Parameters
----------
filename : instance of Path
search the root for this file
target: str
'bids' (the directory containing 'participants.tsv'), 'subject' (the
directory starting with 'sub-'), 'session' (the directory starting with
'ses-')
Returns
-------
Path
path of the target directory
"""
lg.debug(f'Searching root in {filename}')
if target == 'bids' and (filename / 'dataset_description.json').exists():
return filename
elif filename.is_dir():
pattern = target[:3] + '-'
if filename.stem.startswith(pattern):
return filename
return find_root(filename.parent, target) | Find base directory (root) for a filename.
Parameters
----------
filename : instance of Path
search the root for this file
target: str
'bids' (the directory containing 'participants.tsv'), 'subject' (the
directory starting with 'sub-'), 'session' (the directory starting with
'ses-')
Returns
-------
Path
path of the target directory | entailment |
def find_in_bids(filename, pattern=None, generator=False, upwards=False,
wildcard=True, **kwargs):
"""Find nearest file matching some criteria.
Parameters
----------
filename : instance of Path
search the root for this file
pattern : str
glob string for search criteria of the filename of interest (remember
to include '*'). The pattern is passed directly to rglob.
wildcard : bool
use wildcards for unspecified fields or not (if True, add "_*_" between
fields)
upwards : bool
where to keep on searching upwards
kwargs : dict
Returns
-------
Path
filename matching the pattern
"""
if upwards and generator:
raise ValueError('You cannot search upwards and have a generator')
if pattern is None:
pattern = _generate_pattern(wildcard, kwargs)
lg.debug(f'Searching {pattern} in {filename}')
if upwards and filename == find_root(filename):
raise FileNotFoundError(f'Could not find file matchting {pattern} in {filename}')
if generator:
return filename.rglob(pattern)
matches = list(filename.rglob(pattern))
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
if upwards:
return find_in_bids(filename.parent, pattern=pattern, upwards=upwards)
else:
raise FileNotFoundError(f'Could not find file matching {pattern} in {filename}')
else:
matches_str = '"\n\t"'.join(str(x) for x in matches)
raise FileNotFoundError(f'Multiple files matching "{pattern}":\n\t"{matches_str}"') | Find nearest file matching some criteria.
Parameters
----------
filename : instance of Path
search the root for this file
pattern : str
glob string for search criteria of the filename of interest (remember
to include '*'). The pattern is passed directly to rglob.
wildcard : bool
use wildcards for unspecified fields or not (if True, add "_*_" between
fields)
upwards : bool
where to keep on searching upwards
kwargs : dict
Returns
-------
Path
filename matching the pattern | entailment |
def define_format(self, plotStyle, plotSize):
#Default sizes for computer
sizing_dict = {}
sizing_dict['figure.figsize'] = (14, 8)
sizing_dict['legend.fontsize'] = 15
sizing_dict['axes.labelsize'] = 20
sizing_dict['axes.titlesize'] = 24
sizing_dict['xtick.labelsize'] = 14
sizing_dict['ytick.labelsize'] = 14
self.colorVector = {
'iron':'#4c4c4c',
'silver':'#cccccc',
'dark blue':'#0072B2',
'green':'#009E73',
'orangish':'#D55E00',
'pink':'#CC79A7',
'yellow':'#F0E442',
'cyan':'#56B4E9',
'olive':'#bcbd22',
'grey':'#7f7f7f',
'skin':'#FFB5B8'}
#sizing_dict['text.usetex'] = True
#--Update the colors/format
if plotStyle == None:
self.ColorVector = [None, None, None]
elif plotStyle == 'dark':
plt.style.use('dark_background')
elif plotStyle == 'night':
plt.style.use('seaborn-colorblind')
iron_color = '#4c4c4c' #Iron: (76 76 76)
silver_color = '#cccccc' #Silver: (204 204 204)
sizing_dict['axes.facecolor'] = iron_color
sizing_dict['figure.facecolor'] = iron_color
sizing_dict['axes.edgecolor'] = silver_color
sizing_dict['text.color'] = silver_color
sizing_dict['axes.labelcolor'] = silver_color
sizing_dict['xtick.color'] = silver_color
sizing_dict['ytick.color'] = silver_color
sizing_dict['axes.edgecolor'] = silver_color
#'plt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y']) + cycler('linestyle', ['-', '--', ':', '-.'])))'
#This should be the set up for the cycler we just need to add the colors
#axes.prop_cycle : cycler('color', 'bgrcmyk')
elif plotStyle == 'colorblind':
plt.style.use('seaborn-colorblind')
else:
plt.style.use(plotStyle)
#--Load particular configuration for this plot
if plotSize == 'medium':
rcParams.update(sizing_dict)
elif type(plotSize) is dict:
sizing_dict.update(plotSize)
rcParams.update(sizing_dict)
'''
Seaborn color blind
#0072B2 dark blue
#009E73 green
#D55E00 orangish
#CC79A7 pink
#F0E442 yellow
#56B4E9 cyan
#bcbd22 olive #adicional
#7f7f7f grey
#FFB5B8 skin
'''
'''
Matplotlib default palete
#17becf dark blue
#bcbd22 orange
#2ca02c green
#e377c2 red
#8c564b purple
#9467bd brown
#d62728 pink
#7f7f7f grey
#ff7f0e olive
#1f77b4 cyan
'''
'''
--These are matplotlib styles
seaborn-darkgrid
seaborn-notebook
classic
seaborn-ticks
grayscale
bmh
seaborn-talk
dark_background
ggplot
fivethirtyeight
seaborn-colorblind
seaborn-deep
seaborn-whitegrid
seaborn-bright
seaborn-poster
seaborn-muted
seaborn-paper
seaborn-white
seaborn-pastel
seaborn-dark
seaborn
seaborn-dark-palette
''' | Seaborn color blind
#0072B2 dark blue
#009E73 green
#D55E00 orangish
#CC79A7 pink
#F0E442 yellow
#56B4E9 cyan
#bcbd22 olive #adicional
#7f7f7f grey
#FFB5B8 skin | entailment |
def get_xyz(self, list_of_names=None):
"""Get xyz coordinates for these electrodes
Parameters
----------
list_of_names : list of str
list of electrode names to use
Returns
-------
list of tuples of 3 floats (x, y, z)
list of xyz coordinates for all the electrodes
TODO
----
coordinate system of electrodes
"""
if list_of_names is not None:
filter_lambda = lambda x: x['name'] in list_of_names
else:
filter_lambda = None
return self.electrodes.get(filter_lambda=filter_lambda,
map_lambda=lambda e: (float(e['x']),
float(e['y']),
float(e['z']))) | Get xyz coordinates for these electrodes
Parameters
----------
list_of_names : list of str
list of electrode names to use
Returns
-------
list of tuples of 3 floats (x, y, z)
list of xyz coordinates for all the electrodes
TODO
----
coordinate system of electrodes | entailment |
def bces(y1,y1err,y2,y2err,cerr):
"""
Does the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal.
Fitting form: Y=AX+B.
Usage:
>>> a,b,aerr,berr,covab=bces(x,xerr,y,yerr,cov)
Output:
- a,b : best-fit parameters a,b of the linear regression
- aerr,berr : the standard deviations in a,b
- covab : the covariance between a and b (e.g. for plotting confidence bands)
Arguments:
- x,y : data
- xerr,yerr: measurement errors affecting x and y
- cov : covariance between the measurement errors
(all are arrays)
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
# Arrays holding the code main results for each method:
# Elements: 0-Y|X, 1-X|Y, 2-bisector, 3-orthogonal
a,b,avar,bvar,covarxiz,covar_ba=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
# Lists holding the xi and zeta arrays for each method above
xi,zeta=[],[]
# Calculate sigma's for datapoints using length of conf. intervals
sig11var = numpy.mean( y1err**2 )
sig22var = numpy.mean( y2err**2 )
sig12var = numpy.mean( cerr )
# Covariance of Y1 (X) and Y2 (Y)
covar_y1y2 = numpy.mean( (y1-y1.mean())*(y2-y2.mean()) )
# Compute the regression slopes
a[0] = (covar_y1y2 - sig12var)/(y1.var() - sig11var) # Y|X
a[1] = (y2.var() - sig22var)/(covar_y1y2 - sig12var) # X|Y
a[2] = ( a[0]*a[1] - 1.0 + numpy.sqrt((1.0 + a[0]**2)*(1.0 + a[1]**2)) ) / (a[0]+a[1]) # bisector
if covar_y1y2<0:
sign = -1.
else:
sign = 1.
a[3] = 0.5*((a[1]-(1./a[0])) + sign*numpy.sqrt(4.+(a[1]-(1./a[0]))**2)) # orthogonal
# Compute intercepts
for i in range(4):
b[i]=y2.mean()-a[i]*y1.mean()
# Set up variables to calculate standard deviations of slope/intercept
xi.append( ( (y1-y1.mean()) * (y2-a[0]*y1-b[0]) + a[0]*y1err**2 ) / (y1.var()-sig11var) ) # Y|X
xi.append( ( (y2-y2.mean()) * (y2-a[1]*y1-b[1]) - y2err**2 ) / covar_y1y2 ) # X|Y
xi.append( xi[0] * (1.+a[1]**2)*a[2] / ((a[0]+a[1])*numpy.sqrt((1.+a[0]**2)*(1.+a[1]**2))) + xi[1] * (1.+a[0]**2)*a[2] / ((a[0]+a[1])*numpy.sqrt((1.+a[0]**2)*(1.+a[1]**2))) ) # bisector
xi.append( xi[0] * a[3]/(a[0]**2*numpy.sqrt(4.+(a[1]-1./a[0])**2)) + xi[1]*a[3]/numpy.sqrt(4.+(a[1]-1./a[0])**2) ) # orthogonal
for i in range(4):
zeta.append( y2 - a[i]*y1 - y1.mean()*xi[i] )
for i in range(4):
# Calculate variance for all a and b
avar[i]=xi[i].var()/xi[i].size
bvar[i]=zeta[i].var()/zeta[i].size
# Sample covariance obtained from xi and zeta (paragraph after equation 15 in AB96)
covarxiz[i]=numpy.mean( (xi[i]-xi[i].mean()) * (zeta[i]-zeta[i].mean()) )
# Covariance between a and b (equation after eq. 15 in AB96)
covar_ab=covarxiz/y1.size
return a,b,numpy.sqrt(avar),numpy.sqrt(bvar),covar_ab | Does the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal.
Fitting form: Y=AX+B.
Usage:
>>> a,b,aerr,berr,covab=bces(x,xerr,y,yerr,cov)
Output:
- a,b : best-fit parameters a,b of the linear regression
- aerr,berr : the standard deviations in a,b
- covab : the covariance between a and b (e.g. for plotting confidence bands)
Arguments:
- x,y : data
- xerr,yerr: measurement errors affecting x and y
- cov : covariance between the measurement errors
(all are arrays)
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo | entailment |
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot | Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo | entailment |
def bcesboot(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
# Progress bar initialization
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation\method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
# Progress bar
# Bootstrapping results
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab | Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo | entailment |
def bcesboot_backup(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
import fish
# Progress bar initialization
peixe = fish.ProgressFish(total=nsim)
print "Bootstrapping progress:"
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation\method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
# Progress bar
peixe.animate(amount=i)
# Bootstrapping results
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab | Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo | entailment |
def ab(x):
"""
This method is the big bottleneck of the parallel BCES code. That's the
reason why I put these calculations in a separate method, in order to
distribute this among the cores. In the original BCES method, this is
inside the main routine.
Argument:
[y1,y1err,y2,y2err,cerr,nsim]
where nsim is the number of bootstrapping trials sent to each core.
:returns: am,bm : the matrixes with slope and intercept where each line corresponds to a bootrap trial and each column maps a different BCES method (ort, y|x etc).
Be very careful and do not use lambda functions when calling this
method and passing it to multiprocessing or ipython.parallel!
I spent >2 hours figuring out why the code was not working until I
realized the reason was the use of lambda functions.
"""
y1,y1err,y2,y2err,cerr,nsim=x[0],x[1],x[2],x[3],x[4],x[5]
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
return am,bm | This method is the big bottleneck of the parallel BCES code. That's the
reason why I put these calculations in a separate method, in order to
distribute this among the cores. In the original BCES method, this is
inside the main routine.
Argument:
[y1,y1err,y2,y2err,cerr,nsim]
where nsim is the number of bootstrapping trials sent to each core.
:returns: am,bm : the matrixes with slope and intercept where each line corresponds to a bootrap trial and each column maps a different BCES method (ort, y|x etc).
Be very careful and do not use lambda functions when calling this
method and passing it to multiprocessing or ipython.parallel!
I spent >2 hours figuring out why the code was not working until I
realized the reason was the use of lambda functions. | entailment |
def bcesp(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Parallel implementation of the BCES with bootstrapping.
Divide the bootstraps equally among the threads (cores) of
the machine. It will automatically detect the number of
cores available.
Usage:
>>> a,b,aerr,berr,covab=bcesp(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b - best-fit parameters a,b of the linear regression
:returns: aerr,berr - the standard deviations in a,b
:returns: covab - the covariance between a and b (e.g. for plotting confidence bands)
.. seealso:: Check out ~/work/projects/playground/parallel python/bcesp.py for the original, testing, code. I deleted some line from there to make the "production" version.
* v1 Mar 2012: serial version ported from bces_regress.f. Added covariance output.
* v2 May 3rd 2012: parallel version ported from nemmen.bcesboot.
.. codeauthor: Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
import time # for benchmarking
import multiprocessing
print "BCES,", nsim,"trials... ",
tic=time.time()
# Find out number of cores available
ncores=multiprocessing.cpu_count()
# We will divide the processing into how many parts?
n=2*ncores
"""
Must create lists that will be distributed among the many
cores with structure
core1 <- [y1,y1err,y2,y2err,cerr,nsim/n]
core2 <- [y1,y1err,y2,y2err,cerr,nsim/n]
etc...
"""
pargs=[] # this is a list of lists!
for i in range(n):
pargs.append([y1,y1err,y2,y2err,cerr,nsim/n])
# Initializes the parallel engine
pool = multiprocessing.Pool(processes=ncores) # multiprocessing package
"""
Each core processes ab(input)
return matrixes Am,Bm with the results of nsim/n
presult[i][0] = Am with nsim/n lines
presult[i][1] = Bm with nsim/n lines
"""
presult=pool.map(ab, pargs) # multiprocessing
pool.close() # close the parallel engine
# vstack the matrixes processed from all cores
i=0
for m in presult:
if i==0:
# Initialize the matrixes
am,bm=m[0].copy(),m[1].copy()
else:
am=numpy.vstack((am,m[0]))
bm=numpy.vstack((bm,m[1]))
i=i+1
# Computes the bootstrapping results on the stacked matrixes
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
print "%f s" % (time.time() - tic)
return a,b,erra,errb,covab | Parallel implementation of the BCES with bootstrapping.
Divide the bootstraps equally among the threads (cores) of
the machine. It will automatically detect the number of
cores available.
Usage:
>>> a,b,aerr,berr,covab=bcesp(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b - best-fit parameters a,b of the linear regression
:returns: aerr,berr - the standard deviations in a,b
:returns: covab - the covariance between a and b (e.g. for plotting confidence bands)
.. seealso:: Check out ~/work/projects/playground/parallel python/bcesp.py for the original, testing, code. I deleted some line from there to make the "production" version.
* v1 Mar 2012: serial version ported from bces_regress.f. Added covariance output.
* v2 May 3rd 2012: parallel version ported from nemmen.bcesboot.
.. codeauthor: Rodrigo Nemmen, http://goo.gl/8S1Oo | entailment |
def mean(data):
"""Return the sample arithmetic mean of data."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n | Return the sample arithmetic mean of data. | entailment |
def pstdev(data):
"""Calculates the population standard deviation."""
#: http://stackoverflow.com/a/27758326
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5 | Calculates the population standard deviation. | entailment |
def median(lst):
""" Calcuates the median value in a @lst """
#: http://stackoverflow.com/a/24101534
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1])/2.0 | Calcuates the median value in a @lst | entailment |
def send_email(recipients: list, subject: str, text: str, html: str='', sender: str='', files: list=[], exceptions: bool=False):
"""
:param recipients: List of recipients; or single email (str); or comma-separated email list (str); or list of name-email pairs (e.g. settings.ADMINS)
:param subject: Subject of the email
:param text: Body (text)
:param html: Body (html)
:param sender: Sender email, or settings.DEFAULT_FROM_EMAIL if missing
:param files: Paths to files to attach
:param exceptions: Raise exception if email sending fails
:return: Status code 202 if all emails were sent successfully, error status code otherwise
"""
import sendgrid
from sendgrid.helpers.mail import Content, Mail, Attachment
from django.conf import settings
from base64 import b64encode
from os.path import basename
from django.utils.timezone import now
from jutil.logs import log_event
try:
# default sender to settings.DEFAULT_FROM_EMAIL
if not sender:
sender = settings.DEFAULT_FROM_EMAIL
# support multiple recipient list styles
if isinstance(recipients, str): # allow single email and comma-separated list as input
recipients = [str(r).strip() for r in recipients.split(',')]
sg = sendgrid.SendGridAPIClient(apikey=settings.EMAIL_SENDGRID_API_KEY)
from_email = sendgrid.Email(sender or settings.DEFAULT_FROM_EMAIL)
content = Content('text/plain', text) if not html else Content('text/html', html)
attachments = []
for filename in files:
with open(filename, 'rb') as fp:
attachment = Attachment()
attachment.content = b64encode(fp.read()).decode()
attachment.type = "application/octet-stream"
attachment.filename = basename(filename)
attachment.content_id = basename(filename)
attachment.disposition = "attachment"
attachments.append(attachment)
except Exception as e:
logger.error(e)
if exceptions:
raise
return -1
status_codes = []
for recipient in recipients:
try:
t = now()
to_email = sendgrid.Email()
if isinstance(recipient, str):
to_email.email = recipient
elif (isinstance(recipient, list) or isinstance(recipient, tuple)) and len(recipient) == 2:
to_email.name = recipient[0]
to_email.email = recipient[1]
else:
raise Exception('Invalid recipient format: {}'.format(recipient))
mail = Mail(from_email=from_email, subject=subject, to_email=to_email, content=content)
for attachment in attachments:
mail.add_attachment(attachment)
res = sg.client.mail.send.post(request_body=mail.get())
send_dt = (now()-t).total_seconds()
if res.status_code == 202:
log_event('EMAIL_SENT', data={'time': send_dt, 'to': recipient, 'subject': subject, 'status': res.status_code})
else:
log_event('EMAIL_ERROR', data={'time': send_dt, 'to': recipient, 'subject': subject, 'status': res.status_code, 'body': res.body})
status_codes.append(res.status_code)
except Exception as e:
logger.error(e)
if exceptions:
raise
status_codes.append(-1)
for status in status_codes:
if status != 202:
return status
return 202 | :param recipients: List of recipients; or single email (str); or comma-separated email list (str); or list of name-email pairs (e.g. settings.ADMINS)
:param subject: Subject of the email
:param text: Body (text)
:param html: Body (html)
:param sender: Sender email, or settings.DEFAULT_FROM_EMAIL if missing
:param files: Paths to files to attach
:param exceptions: Raise exception if email sending fails
:return: Status code 202 if all emails were sent successfully, error status code otherwise | entailment |
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
"""
ignores = {}
for ignore in html_ignore_whitespace_re.finditer(s):
name = "{}{}{}".format(r"{}", uuid.uuid4(), r"{}")
ignores[name] = ignore.group()
s = s.replace(ignore.group(), name)
s = whitespace_re(r' ', s).strip()
for name, val in ignores.items():
s = s.replace(name, val)
return s | Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags | entailment |
def hashtag_links(uri, s):
""" Turns hashtag-like strings into HTML links
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |#|hashtags in
-> #str HTML link |<a href="/uri/hashtag">hashtag</a>|
"""
for tag, after in hashtag_re.findall(s):
_uri = '/' + (uri or "").lstrip("/") + quote(tag)
link = '<a href="{}">#{}</a>{}'.format(_uri.lower(), tag, after)
s = s.replace('#' + tag, link)
return s | Turns hashtag-like strings into HTML links
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |#|hashtags in
-> #str HTML link |<a href="/uri/hashtag">hashtag</a>| | entailment |
def mentions_links(uri, s):
""" Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>|
"""
for username, after in mentions_re.findall(s):
_uri = '/' + (uri or "").lstrip("/") + quote(username)
link = '<a href="{}">@{}</a>{}'.format(_uri.lower(), username, after)
s = s.replace('@' + username, link)
return s | Turns mentions-like strings into HTML links,
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |@|mentions in
-> #str HTML link |<a href="/uri/mention">mention</a>| | entailment |
def filter(self, query: Query):
"""Return a new filtered query.
Use the tree to filter the query and return a new query "filtered".
This query can be filtered again using another tree or even a manual
filter.
To manually filter query see :
- https://docs.sqlalchemy.org/en/rel_1_2/orm/query.html?highlight=filter#sqlalchemy.orm.query.Query.filter
"""
entity = query.column_descriptions[0]['type']
new_query, filters = self._root.filter(query, entity)
return new_query.filter(filters) | Return a new filtered query.
Use the tree to filter the query and return a new query "filtered".
This query can be filtered again using another tree or even a manual
filter.
To manually filter query see :
- https://docs.sqlalchemy.org/en/rel_1_2/orm/query.html?highlight=filter#sqlalchemy.orm.query.Query.filter | entailment |
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
# print('\r %s |%s| %s %s' % (prefix, bar, percent, suffix), end='\r')
print(percent)
# Print New Line on Complete
if iteration == total:
print() | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) | entailment |
def print_progress_bar_multi_threads(nb_threads, suffix='', decimals=1, length=15,
fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
string = ""
for k in range(nb_threads):
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
time.sleep(0.001)
try:
threads_state = eval(read_file("threads_state_%s" % str(k)))
except SyntaxError:
pass
iteration = threads_state["iteration"]
total = threads_state["total"]
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
prefix = "Thread %s :" % str(k)
string = string + '%s %s%% ' % (prefix, percent)
print(string + " " + suffix) | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) | entailment |
def upload(target):
# type: (str) -> None
""" Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
"""
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) | Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc | entailment |
def gen_pypirc(username=None, password=None):
# type: (str, str) -> None
""" Generate ~/.pypirc with the given credentials.
Useful for CI builds. Can also get credentials through env variables
``PYPI_USER`` and ``PYPI_PASS``.
Args:
username (str):
pypi username. If not given it will try to take it from the
`` PYPI_USER`` env variable.
password (str):
pypi password. If not given it will try to take it from the
`` PYPI_PASS`` env variable.
"""
path = join(conf.getenv('HOME'), '.pypirc')
username = username or conf.getenv('PYPI_USER', None)
password = password or conf.getenv('PYPI_PASS', None)
if username is None or password is None:
log.err("You must provide $PYPI_USER and $PYPI_PASS")
sys.exit(1)
log.info("Generating <94>{}".format(path))
fs.write_file(path, util.remove_indent('''
[distutils]
index-servers = pypi
[pypi]
repository: https://upload.pypi.org/legacy/
username: {username}
password: {password}
'''.format(
username=username,
password=password
))) | Generate ~/.pypirc with the given credentials.
Useful for CI builds. Can also get credentials through env variables
``PYPI_USER`` and ``PYPI_PASS``.
Args:
username (str):
pypi username. If not given it will try to take it from the
`` PYPI_USER`` env variable.
password (str):
pypi password. If not given it will try to take it from the
`` PYPI_PASS`` env variable. | entailment |
def pick_sdf(filename, directory=None):
"""Returns a full path to the chosen SDF file. The supplied file
is not expected to contain a recognised SDF extension, this is added
automatically.
If a file with the extension `.sdf.gz` or `.sdf` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SDF file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.sdf.gz'):
return file_path + '.sdf.gz'
elif os.path.isfile(file_path + '.sdf'):
return file_path + '.sdf'
# Couldn't find a suitable SDF file
return None | Returns a full path to the chosen SDF file. The supplied file
is not expected to contain a recognised SDF extension, this is added
automatically.
If a file with the extension `.sdf.gz` or `.sdf` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SDF file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str`` | entailment |
def main():
"""Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send Slack messages with attachments!
:return: None
"""
log = logging.getLogger(mod_logger + '.main')
parser = argparse.ArgumentParser(description='This Python module allows '
'sending Slack messages.')
parser.add_argument('-u', '--url', help='Slack webhook URL', required=True)
parser.add_argument('-t', '--text', help='Text of the message', required=True)
parser.add_argument('-n', '--channel', help='Slack channel', required=True)
parser.add_argument('-i', '--icon', help='URL for the Slack icon', required=False)
parser.add_argument('-c', '--color', help='Color of the Slack post', required=False)
parser.add_argument('-a', '--attachment', help='Text for the Slack Attachment', required=False)
parser.add_argument('-p', '--pretext', help='Pretext for the Slack attachment', required=False)
args = parser.parse_args()
# Create the SlackMessage object
try:
slack_msg = SlackMessage(args.url, channel=args.channel, icon_url=args.icon, text=args.text)
except ValueError as e:
msg = 'Unable to create slack message\n{ex}'.format(ex=e)
log.error(msg)
print(msg)
return
# If provided, create the SlackAttachment object
if args.attachment:
try:
slack_att = SlackAttachment(fallback=args.attachment, color=args.color,
pretext=args.pretext, text=args.attachment)
except ValueError:
_, ex, trace = sys.exc_info()
log.error('Unable to create slack attachment\n{e}'.format(e=str(ex)))
return
slack_msg.add_attachment(slack_att)
# Send Slack message
try:
slack_msg.send()
except(TypeError, ValueError, IOError):
_, ex, trace = sys.exc_info()
log.error('Unable to send Slack message\n{e}'.format(e=str(ex)))
return
log.debug('Your message has been Slacked successfully!') | Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send Slack messages with attachments!
:return: None | entailment |
def set_text(self, text):
"""Sets the text attribute of the payload
:param text: (str) Text of the message
:return: None
"""
log = logging.getLogger(self.cls_logger + '.set_text')
if not isinstance(text, basestring):
msg = 'text arg must be a string'
log.error(msg)
raise ValueError(msg)
self.payload['text'] = text
log.debug('Set message text to: {t}'.format(t=text)) | Sets the text attribute of the payload
:param text: (str) Text of the message
:return: None | entailment |
def set_icon(self, icon_url):
"""Sets the icon_url for the message
:param icon_url: (str) Icon URL
:return: None
"""
log = logging.getLogger(self.cls_logger + '.set_icon')
if not isinstance(icon_url, basestring):
msg = 'icon_url arg must be a string'
log.error(msg)
raise ValueError(msg)
self.payload['icon_url'] = icon_url
log.debug('Set Icon URL to: {u}'.format(u=icon_url)) | Sets the icon_url for the message
:param icon_url: (str) Icon URL
:return: None | entailment |
def add_attachment(self, attachment):
"""Adds an attachment to the SlackMessage payload
This public method adds a slack message to the attachment
list.
:param attachment: SlackAttachment object
:return: None
"""
log = logging.getLogger(self.cls_logger + '.add_attachment')
if not isinstance(attachment, SlackAttachment):
msg = 'attachment must be of type: SlackAttachment'
log.error(msg)
raise ValueError(msg)
self.attachments.append(attachment.attachment)
log.debug('Added attachment: {a}'.format(a=attachment)) | Adds an attachment to the SlackMessage payload
This public method adds a slack message to the attachment
list.
:param attachment: SlackAttachment object
:return: None | entailment |
def send(self):
"""Sends the Slack message
This public method sends the Slack message along with any
attachments, then clears the attachments array.
:return: None
:raises: OSError
"""
log = logging.getLogger(self.cls_logger + '.send')
if self.attachments:
self.payload['attachments'] = self.attachments
# Encode payload in JSON
log.debug('Using payload: %s', self.payload)
try:
json_payload = json.JSONEncoder().encode(self.payload)
except(TypeError, ValueError, OverflowError):
_, ex, trace = sys.exc_info()
msg = 'There was a problem encoding the JSON payload\n{e}'.format(e=str(ex))
raise OSError, msg, trace
else:
log.debug('JSON payload: %s', json_payload)
# Post to Slack!
log.debug('Posting message to Slack...')
try:
result = requests.post(url=self.webhook_url, data=json_payload)
except requests.exceptions.ConnectionError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem posting to Slack\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise OSError, msg, trace
# Check return code
if result.status_code != 200:
log.error('Slack post to url {u} failed with code: {c}'.format(c=result.status_code, u=self.webhook_url))
else:
log.debug('Posted message to Slack successfully.')
# Clear out attachments after sending
self.attachments = []
self.payload.pop('attachments', None) | Sends the Slack message
This public method sends the Slack message along with any
attachments, then clears the attachments array.
:return: None
:raises: OSError | entailment |
def send_cons3rt_agent_logs(self):
"""Sends a Slack message with an attachment for each cons3rt agent log
:return:
"""
log = logging.getLogger(self.cls_logger + '.send_cons3rt_agent_logs')
log.debug('Searching for log files in directory: {d}'.format(d=self.dep.cons3rt_agent_log_dir))
for item in os.listdir(self.dep.cons3rt_agent_log_dir):
item_path = os.path.join(self.dep.cons3rt_agent_log_dir, item)
if os.path.isfile(item_path):
log.debug('Adding slack attachment with cons3rt agent log file: {f}'.format(f=item_path))
try:
with open(item_path, 'r') as f:
file_text = f.read()
except (IOError, OSError) as e:
log.warn('There was a problem opening file: {f}\n{e}'.format(f=item_path, e=e))
continue
# Take the last 7000 characters
file_text_trimmed = file_text[-7000:]
attachment = SlackAttachment(fallback=file_text_trimmed, text=file_text_trimmed, color='#9400D3')
self.add_attachment(attachment)
self.send() | Sends a Slack message with an attachment for each cons3rt agent log
:return: | entailment |
def send_text_file(self, text_file):
"""Sends a Slack message with the contents of a text file
:param: test_file: (str) Full path to text file to send
:return: None
:raises: Cons3rtSlackerError
"""
log = logging.getLogger(self.cls_logger + '.send_text_file')
if not isinstance(text_file, basestring):
msg = 'arg text_file must be a string, found type: {t}'.format(t=text_file.__class__.__name__)
raise Cons3rtSlackerError(msg)
if not os.path.isfile(text_file):
msg = 'The provided text_file was not found or is not a file: {f}'.format(f=text_file)
raise Cons3rtSlackerError(msg)
log.debug('Attempting to send a Slack message with the contents of file: {f}'.format(f=text_file))
try:
with open(text_file, 'r') as f:
file_text = f.read()
except (IOError, OSError):
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem opening file: {f}\n{e}'.format(
n=ex.__class__.__name__, f=text_file, e=str(ex))
raise Cons3rtSlackerError, msg, trace
# Take the last 7000 characters
file_text_trimmed = file_text[-7000:]
attachment = SlackAttachment(fallback=file_text_trimmed, text=file_text_trimmed, color='#9400D3')
self.add_attachment(attachment)
self.send() | Sends a Slack message with the contents of a text file
:param: test_file: (str) Full path to text file to send
:return: None
:raises: Cons3rtSlackerError | entailment |
def deploy(project, version, promote, quiet):
""" Deploy the app to the target environment.
The target environments can be configured using the ENVIRONMENTS conf
variable. This will also collect all static files and compile translation
messages
"""
from . import logic
logic.deploy(project, version, promote, quiet) | Deploy the app to the target environment.
The target environments can be configured using the ENVIRONMENTS conf
variable. This will also collect all static files and compile translation
messages | entailment |
def devserver(port, admin_port, clear):
# type: (int, int, bool) -> None
""" Run devserver. """
from . import logic
logic.devserver(port, admin_port, clear) | Run devserver. | entailment |
def gaussian_filter1d_ppxf(spec, sig):
"""
Convolve a spectrum by a Gaussian with different sigma for every pixel.
If all sigma are the same this routine produces the same output as
scipy.ndimage.gaussian_filter1d, except for the border treatment.
Here the first/last p pixels are filled with zeros.
When creating a template library for SDSS data, this implementation
is 60x faster than a naive for loop over pixels.
:param spec: vector with the spectrum to convolve
:param sig: vector of sigma values (in pixels) for every pixel
:return: spec convolved with a Gaussian with dispersion sig
"""
sig = sig.clip(0.01) # forces zero sigmas to have 0.01 pixels
p = int(np.ceil(np.max(3*sig)))
m = 2*p + 1 # kernel size
x2 = np.linspace(-p, p, m)**2
n = spec.size
a = np.zeros((m, n))
# fig, ax = plt.subplots(1, 1, figsize=(16, 10))
for j in range(m): # Loop over the small size of the kernel
#print j, n-m+j+1
indices = n-m+j+1
a[j,:] = spec
a[j, p:-p] = spec[j:n-m+j+1]
# ax.plot(waveData, a[j,:], label=j)
# ax.update({'xlabel': 'Wavelength (nm)', 'ylabel': 'Flux (normalised)'})
# ax.legend()
# plt.show()
gau = np.exp(-x2[:, None]/(2*sig**2))
gau /= np.sum(gau, 0)[None, :] # Normalize kernel
conv_spectrum = np.sum(a*gau, 0)
return conv_spectrum | Convolve a spectrum by a Gaussian with different sigma for every pixel.
If all sigma are the same this routine produces the same output as
scipy.ndimage.gaussian_filter1d, except for the border treatment.
Here the first/last p pixels are filled with zeros.
When creating a template library for SDSS data, this implementation
is 60x faster than a naive for loop over pixels.
:param spec: vector with the spectrum to convolve
:param sig: vector of sigma values (in pixels) for every pixel
:return: spec convolved with a Gaussian with dispersion sig | entailment |
def call_plugins(self, step):
'''
For each plugins, check if a "step" method exist on it, and call it
Args:
step (str): The method to search and call on each plugin
'''
for plugin in self.plugins:
try:
getattr(plugin, step)()
except AttributeError:
self.logger.debug("{} doesn't exist on plugin {}".format(step, plugin))
except TypeError:
self.logger.debug("{} on plugin {} is not callable".format(step, plugin)) | For each plugins, check if a "step" method exist on it, and call it
Args:
step (str): The method to search and call on each plugin | entailment |
def run(self):
"""
Run the application
"""
self.call_plugins("on_run")
if vars(self.arguments).get("version", None):
self.logger.info("{app_name}: {version}".format(app_name=self.app_name, version=self.version))
else:
if self.arguments.command == "main":
self.main()
else:
self.subcommands[self.arguments.command].run()
self.call_plugins("on_end") | Run the application | entailment |
def pretend_option(fn):
# type: (FunctionType) -> FunctionType
""" Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code.
"""
def set_pretend(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
from peltak.core import shell
context.set('pretend', value or False)
if value:
shell.cprint('<90>{}', _pretend_msg())
return click.option(
'--pretend',
is_flag=True,
help=("Do not actually do anything, just print shell commands that"
"would be executed."),
expose_value=False,
callback=set_pretend
)(fn) | Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code. | entailment |
def verbose_option(fn):
""" Decorator to add a --verbose option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'verbose' if the command needs it. To get the current value you can do:
>>> from peltak.core import context
>>>
>>> pretend = context.get('verbose', False)
This value will be accessible from anywhere in the code.
"""
def set_verbose(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
context.set('verbose', value or 0)
return click.option(
'-v', '--verbose',
is_flag=True,
expose_value=False,
callback=set_verbose,
help="Be verbose. Can specify multiple times for more verbosity.",
)(fn) | Decorator to add a --verbose option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'verbose' if the command needs it. To get the current value you can do:
>>> from peltak.core import context
>>>
>>> pretend = context.get('verbose', False)
This value will be accessible from anywhere in the code. | entailment |
def changelog():
# type: () -> str
""" Print change log since last release. """
# Skip 'v' prefix
versions = [x for x in git.tags() if versioning.is_valid(x[1:])]
cmd = 'git log --format=%H'
if versions:
cmd += ' {}..HEAD'.format(versions[-1])
hashes = shell.run(cmd, capture=True).stdout.strip().splitlines()
commits = [git.CommitDetails.get(h) for h in hashes]
tags = conf.get('changelog.tags', [
{'header': 'Features', 'tag': 'feature'},
{'header': 'Changes', 'tag': 'change'},
{'header': 'Fixes', 'tag': 'fix'},
])
results = OrderedDict((
(x['header'], []) for x in tags
))
for commit in commits:
commit_items = extract_changelog_items(commit.desc, tags)
for header, items in commit_items.items():
results[header] += items
lines = [
'<35>v{}<0>'.format(versioning.current()),
'',
]
for header, items in results.items():
if items:
lines += [
'',
'<32>{}<0>'.format(header),
'<32>{}<0>'.format('-' * len(header)),
'',
]
for item_text in items:
item_lines = textwrap.wrap(item_text, 77)
lines += ['- {}'.format('\n '.join(item_lines))]
lines += ['']
return '\n'.join(lines) | Print change log since last release. | entailment |
def extract_changelog_items(text, tags):
# type: (str) -> Dict[str, List[str]]
""" Extract all tagged items from text.
Args:
text (str):
Text to extract the tagged items from. Each tagged item is a
paragraph that starts with a tag. It can also be a text list item.
Returns:
tuple[list[str], list[str], list[str]]:
A tuple of `(features, changes, fixes)` extracted from the given
text.
The tagged items are usually features/changes/fixes but it can be configured
through `pelconf.yaml`.
"""
patterns = {x['header']: tag_re(x['tag']) for x in tags}
items = {x['header']: [] for x in tags}
curr_tag = None
curr_text = ''
for line in text.splitlines():
if not line.strip():
if curr_tag is not None:
items[curr_tag].append(curr_text)
curr_text = ''
curr_tag = None
for tag in tags:
m = patterns[tag['header']].match(line)
if m:
if curr_tag is not None:
items[curr_tag].append(curr_text)
curr_text = ''
curr_tag = tag['header']
line = m.group('text')
break
if curr_tag is not None:
curr_text = '{} {}'.format(curr_text.strip(), line.strip()).strip()
if curr_tag is not None:
items[curr_tag].append(curr_text)
return items | Extract all tagged items from text.
Args:
text (str):
Text to extract the tagged items from. Each tagged item is a
paragraph that starts with a tag. It can also be a text list item.
Returns:
tuple[list[str], list[str], list[str]]:
A tuple of `(features, changes, fixes)` extracted from the given
text.
The tagged items are usually features/changes/fixes but it can be configured
through `pelconf.yaml`. | entailment |
def _(mcs, cls_name="Object", with_meta=None):
""" Method to generate real metaclass to be used::
mc = ExtensibleType._("MyClass") # note this line
@six.add_metaclass(mc)
class MyClassBase(object):
pass
:param str cls_name: name of generated class
:param class with_meta: Mix aditional metaclass in.
(default: None)
:return: specific metaclass to track new inheritance tree
"""
if with_meta is not None:
class EXType(with_meta, mcs):
_cls_name = cls_name
_base_classes = []
_generated_class = None
else:
class EXType(mcs):
_cls_name = cls_name
_base_classes = []
_generated_class = None
return EXType | Method to generate real metaclass to be used::
mc = ExtensibleType._("MyClass") # note this line
@six.add_metaclass(mc)
class MyClassBase(object):
pass
:param str cls_name: name of generated class
:param class with_meta: Mix aditional metaclass in.
(default: None)
:return: specific metaclass to track new inheritance tree | entailment |
def get_class(mcs):
""" Generates new class to gether logic of all available extensions
::
mc = ExtensibleType._("MyClass")
@six.add_metaclass(mc)
class MyClassBase(object):
pass
# get class with all extensions enabled
MyClass = mc.get_class()
"""
if mcs._generated_class is None:
mcs._generated_class = type(
mcs._cls_name,
tuple(mcs._base_classes),
{'_generated': True})
return mcs._generated_class | Generates new class to gether logic of all available extensions
::
mc = ExtensibleType._("MyClass")
@six.add_metaclass(mc)
class MyClassBase(object):
pass
# get class with all extensions enabled
MyClass = mc.get_class() | entailment |
def _add_base_class(mcs, cls):
""" Adds new class *cls* to base classes
"""
# Do all magic only if subclass had defined required attributes
if getattr(mcs, '_base_classes_hash', None) is not None:
meta = getattr(cls, 'Meta', None)
_hash = getattr(meta, mcs._hashattr, None)
if _hash is None and cls not in mcs._get_base_classes():
mcs._base_classes.insert(0, cls)
mcs._generated_class = {} # Cleanup all caches
elif _hash is not None and cls not in mcs._get_base_classes(_hash):
mcs._base_classes_hash[_hash].insert(0, cls)
mcs._generated_class[_hash] = None | Adds new class *cls* to base classes | entailment |
def _(mcs, cls_name='Object', with_meta=None, hashattr='_name'):
""" Method to generate real metaclass to be used
::
# Create metaclass *mc*
mc = ExtensibleByHashType._("MyClass", hashattr='name')
# Create class using *mc* as metaclass
@six.add_metaclass(mc)
class MyClassBase(object):
pass
:param str cls_name: name of generated class
:param class with_meta: Mix aditional metaclass in.
(default: None)
:param hashattr: name of class Meta attribute to be used as hash.
default='_name'
:return: specific metaclass to track new inheritance tree
"""
extype = super(ExtensibleByHashType, mcs)._(cls_name=cls_name,
with_meta=with_meta)
class EXHType(extype):
_hashattr = hashattr
_base_classes_hash = collections.defaultdict(list)
# Override it by dict to store diferent
# base generated class for each hash
_generated_class = {}
return EXHType | Method to generate real metaclass to be used
::
# Create metaclass *mc*
mc = ExtensibleByHashType._("MyClass", hashattr='name')
# Create class using *mc* as metaclass
@six.add_metaclass(mc)
class MyClassBase(object):
pass
:param str cls_name: name of generated class
:param class with_meta: Mix aditional metaclass in.
(default: None)
:param hashattr: name of class Meta attribute to be used as hash.
default='_name'
:return: specific metaclass to track new inheritance tree | entailment |
def get_class(mcs, name, default=False):
""" Generates new class to gether logic of all available extensions
::
# Create metaclass *mc*
mc = ExtensibleByHashType._("MyClass", hashattr='name')
# Use metaclass *mc* to create base class for extensions
@six.add_metaclass(mc)
class MyClassBase(object):
pass
# Create extension
class MyClassX1(MyClassBase):
class Meta:
name = 'X1'
# get default class
MyClass = mc.get_class(None, default=True)
# get specific class
MyX1 = mc.get_class('X1')
:param name: key to get class for
:param bool default: if set to True will generate default class for
if there no special class defined for such key
:return: generated class for requested type
"""
if default is False and name not in mcs._base_classes_hash:
raise ValueError(
"There is no class registered for key '%s'" % name)
if mcs._generated_class.get(name, None) is None:
cls = type(
mcs._cls_name,
tuple(mcs._get_base_classes(name)),
{'_generated': True})
mcs._generated_class[name] = cls
return mcs._generated_class[name] | Generates new class to gether logic of all available extensions
::
# Create metaclass *mc*
mc = ExtensibleByHashType._("MyClass", hashattr='name')
# Use metaclass *mc* to create base class for extensions
@six.add_metaclass(mc)
class MyClassBase(object):
pass
# Create extension
class MyClassX1(MyClassBase):
class Meta:
name = 'X1'
# get default class
MyClass = mc.get_class(None, default=True)
# get specific class
MyX1 = mc.get_class('X1')
:param name: key to get class for
:param bool default: if set to True will generate default class for
if there no special class defined for such key
:return: generated class for requested type | entailment |
def get_registered_names(mcs):
""" Return's list of names (keys) registered in this tree.
For each name specific classes exists
"""
return [k for k, v in six.iteritems(mcs._base_classes_hash) if v] | Return's list of names (keys) registered in this tree.
For each name specific classes exists | entailment |
def get_last_day_of_month(t: datetime) -> int:
"""
Returns day number of the last day of the month
:param t: datetime
:return: int
"""
tn = t + timedelta(days=32)
tn = datetime(year=tn.year, month=tn.month, day=1)
tt = tn - timedelta(hours=1)
return tt.day | Returns day number of the last day of the month
:param t: datetime
:return: int | entailment |
def localize_time_range(begin: datetime, end: datetime, tz=None) -> (datetime, datetime):
"""
Localizes time range. Uses pytz.utc if None provided.
:param begin: Begin datetime
:param end: End datetime
:param tz: pytz timezone or None (default UTC)
:return: begin, end
"""
if not tz:
tz = pytz.utc
return tz.localize(begin), tz.localize(end) | Localizes time range. Uses pytz.utc if None provided.
:param begin: Begin datetime
:param end: End datetime
:param tz: pytz timezone or None (default UTC)
:return: begin, end | entailment |
def this_week(today: datetime=None, tz=None):
"""
Returns this week begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = today - timedelta(days=today.weekday())
begin = datetime(year=begin.year, month=begin.month, day=begin.day)
return localize_time_range(begin, begin + timedelta(days=7), tz) | Returns this week begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | entailment |
def this_month(today: datetime=None, tz=None):
"""
Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
end = begin + timedelta(days=32)
end = datetime(day=1, month=end.month, year=end.year)
return localize_time_range(begin, end, tz) | Returns current month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | entailment |
def next_month(today: datetime=None, tz=None):
"""
Returns next month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
begin = datetime(day=1, month=today.month, year=today.year)
next_mo = begin + timedelta(days=32)
begin = datetime(day=1, month=next_mo.month, year=next_mo.year)
following_mo = begin + timedelta(days=32)
end = datetime(day=1, month=following_mo.month, year=following_mo.year)
return localize_time_range(begin, end, tz) | Returns next month begin (inclusive) and end (exclusive).
:param today: Some date in the month (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | entailment |
def last_year(today: datetime=None, tz=None):
"""
Returns last year begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive)
"""
if today is None:
today = datetime.utcnow()
end = datetime(day=1, month=1, year=today.year)
end_incl = end - timedelta(seconds=1)
begin = datetime(day=1, month=1, year=end_incl.year)
return localize_time_range(begin, end, tz) | Returns last year begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) | entailment |
def add_month(t: datetime, n: int=1) -> datetime:
"""
Adds +- n months to datetime.
Clamps to number of days in given month.
:param t: datetime
:param n: count
:return: datetime
"""
t2 = t
for count in range(abs(n)):
if n > 0:
t2 = datetime(year=t2.year, month=t2.month, day=1) + timedelta(days=32)
else:
t2 = datetime(year=t2.year, month=t2.month, day=1) - timedelta(days=2)
try:
t2 = t.replace(year=t2.year, month=t2.month)
except Exception:
first, last = monthrange(t2.year, t2.month)
t2 = t.replace(year=t2.year, month=t2.month, day=last)
return t2 | Adds +- n months to datetime.
Clamps to number of days in given month.
:param t: datetime
:param n: count
:return: datetime | entailment |
def per_delta(start: datetime, end: datetime, delta: timedelta):
"""
Iterates over time range in steps specified in delta.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param delta: Step interval
:return: Iterable collection of [(start+td*0, start+td*1), (start+td*1, start+td*2), ..., end)
"""
curr = start
while curr < end:
curr_end = curr + delta
yield curr, curr_end
curr = curr_end | Iterates over time range in steps specified in delta.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param delta: Step interval
:return: Iterable collection of [(start+td*0, start+td*1), (start+td*1, start+td*2), ..., end) | entailment |
def per_month(start: datetime, end: datetime, n: int=1):
"""
Iterates over time range in one month steps.
Clamps to number of days in given month.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param n: Number of months to step. Default is 1.
:return: Iterable collection of [(month+0, month+1), (month+1, month+2), ..., end)
"""
curr = start.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
while curr < end:
curr_end = add_month(curr, n)
yield curr, curr_end
curr = curr_end | Iterates over time range in one month steps.
Clamps to number of days in given month.
:param start: Start of time range (inclusive)
:param end: End of time range (exclusive)
:param n: Number of months to step. Default is 1.
:return: Iterable collection of [(month+0, month+1), (month+1, month+2), ..., end) | entailment |
def get_requirements() -> List[str]:
"""Return the requirements as a list of string."""
requirements_path = os.path.join(
os.path.dirname(__file__), 'requirements.txt'
)
with open(requirements_path) as f:
return f.read().split() | Return the requirements as a list of string. | entailment |
def project_dev_requirements():
""" List requirements for peltak commands configured for the project.
This list is dynamic and depends on the commands you have configured in
your project's pelconf.yaml. This will be the combined list of packages
needed to be installed in order for all the configured commands to work.
"""
from peltak.core import conf
from peltak.core import shell
for dep in sorted(conf.requirements):
shell.cprint(dep) | List requirements for peltak commands configured for the project.
This list is dynamic and depends on the commands you have configured in
your project's pelconf.yaml. This will be the combined list of packages
needed to be installed in order for all the configured commands to work. | entailment |
def get_func_params(method, called_params):
"""
:type method: function
:type called_params: dict
:return:
"""
insp = inspect.getfullargspec(method)
if not isinstance(called_params, dict):
raise UserWarning()
_called_params = called_params.copy()
params = {}
arg_count = len(insp.args)
arg_def_count = len(insp.defaults) if insp.defaults is not None else 0
for i in range(arg_count):
arg = insp.args[i]
if i == 0 and isinstance(method, types.MethodType):
continue # skip self argument
if arg in _called_params:
params[arg] = _called_params.pop(arg)
elif i - arg_count + arg_def_count >= 0:
params[arg] = insp.defaults[i - arg_count + arg_def_count]
else:
raise TypeError('Argument "%s" not given' % arg)
for kwarg in insp.kwonlyargs:
if kwarg in _called_params:
params[kwarg] = _called_params.pop(kwarg)
elif kwarg in insp.kwonlydefaults:
params[kwarg] = insp.kwonlydefaults[kwarg]
else:
raise TypeError('Argument "%s" not given' % kwarg)
if insp.varkw is None:
if len(_called_params) > 0:
raise TypeError('Got unexpected parameter(s): %s'
'' % (", ".join(_called_params)))
else:
params.update(_called_params)
return params | :type method: function
:type called_params: dict
:return: | entailment |
def parse_dsn(dsn, default_port=5432, protocol='http://'):
"""
Разбирает строку подключения к БД и возвращает список из (host, port,
username, password, dbname)
:param dsn: Строка подключения. Например: username@localhost:5432/dname
:type: str
:param default_port: Порт по-умолчанию
:type default_port: int
:params protocol
:type protocol str
:return: [host, port, username, password, dbname]
:rtype: list
"""
parsed = urlparse(protocol + dsn)
return [
parsed.hostname or 'localhost',
parsed.port or default_port,
unquote(parsed.username)
if parsed.username is not None else getuser(),
unquote(parsed.password) if parsed.password is not None else None,
parsed.path.lstrip('/'),
] | Разбирает строку подключения к БД и возвращает список из (host, port,
username, password, dbname)
:param dsn: Строка подключения. Например: username@localhost:5432/dname
:type: str
:param default_port: Порт по-умолчанию
:type default_port: int
:params protocol
:type protocol str
:return: [host, port, username, password, dbname]
:rtype: list | entailment |
def build_images():
# type: () -> None
""" Build all docker images for the project. """
registry = conf.get('docker.registry')
docker_images = conf.get('docker.images', [])
for image in docker_images:
build_image(registry, image) | Build all docker images for the project. | entailment |
def push_images():
# type: () -> None
""" Push all project docker images to a remote registry. """
registry = conf.get('docker.registry')
docker_images = conf.get('docker.images', [])
if registry is None:
log.err("You must define docker.registry conf variable to push images")
sys.exit(-1)
for image in docker_images:
push_image(registry, image) | Push all project docker images to a remote registry. | entailment |
def docker_list(registry_pass):
# type: (str) -> None
""" List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password.
"""
registry = conf.get('docker.registry', None)
if registry is None:
log.err("You must define docker.registry conf variable to list images")
sys.exit(-1)
registry_user = conf.get('docker.registry_user', None)
if registry_user is None:
registry_user = click.prompt("Username")
rc = client.RegistryClient(registry, registry_user, registry_pass)
images = {x: rc.list_tags(x) for x in rc.list_images()}
shell.cprint("<32>Images in <34>{} <32>registry:", registry)
for image, tags in images.items():
shell.cprint(' <92>{}', image)
for tag in tags:
shell.cprint(' <90>{}:<35>{}', image, tag) | List docker images stored in the remote registry.
Args:
registry_pass (str):
Remote docker registry password. | entailment |
def build_image(registry, image):
# type: (str, Dict[str, Any]) -> None
""" Build docker image.
Args:
registry (str):
The name of the registry this image belongs to. If not given, the
resulting image will have a name without the registry.
image (dict[str, Any]):
The dict containing the information about the built image. This is
the same dictionary as defined in DOCKER_IMAGES variable.
"""
if ':' in image['name']:
_, tag = image['name'].split(':', 1)
else:
_, tag = image['name'], None
values = {
'registry': '' if registry is None else registry + '/',
'image': image['name'],
'tag': tag,
}
if tag is None:
args = [
'-t {registry}{image}'.format(**values),
'-t {registry}{image}:{version}'.format(
version=versioning.current(),
**values
),
]
else:
args = ['-t {registry}{image}'.format(**values)]
if 'file' in image:
args.append('-f {}'.format(conf.proj_path(image['file'])))
with conf.within_proj_dir(image.get('path', '.')):
log.info("Building <33>{registry}<35>/{image}", **values)
shell.run('docker build {args} .'.format(args=' '.join(args))) | Build docker image.
Args:
registry (str):
The name of the registry this image belongs to. If not given, the
resulting image will have a name without the registry.
image (dict[str, Any]):
The dict containing the information about the built image. This is
the same dictionary as defined in DOCKER_IMAGES variable. | entailment |
def push_image(registry, image):
# type: (str, Dict[str, Any]) -> None
""" Push the given image to selected repository.
Args:
registry (str):
The name of the registry we're pushing to. This is the address of
the repository without the protocol specification (no http(s)://)
image (dict[str, Any]):
The dict containing the information about the image. This is the
same dictionary as defined in DOCKER_IMAGES variable.
"""
values = {
'registry': registry,
'image': image['name'],
}
log.info("Pushing <33>{registry}<35>/{image}".format(**values))
shell.run('docker push {registry}/{image}'.format(**values)) | Push the given image to selected repository.
Args:
registry (str):
The name of the registry we're pushing to. This is the address of
the repository without the protocol specification (no http(s)://)
image (dict[str, Any]):
The dict containing the information about the image. This is the
same dictionary as defined in DOCKER_IMAGES variable. | entailment |
def _build_opr_data(self, data, store):
"""Returns a well formatted OPR data"""
return {
"invoice_data": {
"invoice": {
"total_amount": data.get("total_amount"),
"description": data.get("description")
},
"store": store.info
},
"opr_data": {
"account_alias": data.get("account_alias")
}
} | Returns a well formatted OPR data | entailment |
def create(self, data={}, store=None):
"""Initiazes an OPR
First step in the OPR process is to create the OPR request.
Returns the OPR token
"""
_store = store or self.store
_data = self._build_opr_data(data, _store) if data else self._opr_data
return self._process('opr/create', _data) | Initiazes an OPR
First step in the OPR process is to create the OPR request.
Returns the OPR token | entailment |
def charge(self, data):
"""Second stage of an OPR request"""
token = data.get("token", self._response["token"])
data = {
"token": token,
"confirm_token": data.get("confirm_token")
}
return self._process('opr/charge', data) | Second stage of an OPR request | entailment |
def get_value(self, field, quick):
# type: (Field, bool) -> Any
""" Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter.
"""
if callable(field.default):
default = field.default(self)
else:
default = field.default
if quick and default is not None:
return default
shell.cprint('<90>{}', field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer)
except ValueError:
shell.cprint("<31>Unsupported value") | Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter. | entailment |
def update_image(self, ami_id, instance_id):
"""Replaces an existing AMI ID with an image created from the provided
instance ID
:param ami_id: (str) ID of the AMI to delete and replace
:param instance_id: (str) ID of the instance ID to create an image from
:return: None
"""
log = logging.getLogger(self.cls_logger + '.update_image')
if not isinstance(ami_id, basestring):
msg = 'Arg ami_id must be of type basestring, found: {t}'.format(t=ami_id.__class__.__name__)
raise ImageUtilError(msg)
if not isinstance(instance_id, basestring):
msg = 'Arg instance_id must be of type basestring, found: {t}'.format(t=instance_id.__class__.__name__)
raise ImageUtilError(msg)
if ami_id is None or instance_id is None:
raise ImageUtilError('The provided args ami_id and instance_id must not be None')
log.info('Removing AMI ID: {a}, and replacing with an image for Instance ID: {i}'.format(
a=ami_id, i=instance_id))
# Get the current AMI info
try:
ami_info = self.ec2.describe_images(DryRun=False, ImageIds=[ami_id], Owners=[self.owner_id])
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to describe image ID: {a}.\n{e}'.format(n=ex.__class__.__name__, a=ami_id, e=str(ex))
raise AWSAPIError, msg, trace
log.debug('Found AMI info: {a}'.format(a=ami_info))
# Grab the current cons3rtuuid tag data
cons3rt_uuid = None
try:
image_tags = ami_info['Images'][0]['Tags']
for image_tag in image_tags:
if image_tag['Key'] == 'cons3rtuuid':
cons3rt_uuid = image_tag['Value']
except KeyError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to find image tags for AMI ID: {a}\n{e}'.format(
a=ami_id, n=ex.__class__.__name__, e=str(ex))
raise ImageUtilError, msg, trace
if cons3rt_uuid is None:
raise ImageUtilError('AMI tag cons3rtuuid not found on image ID: {a}'.format(a=ami_id))
log.info('Found image tag for cons3rtuuid: {u}'.format(u=cons3rt_uuid))
log.debug('Found image tags: {t}'.format(t=image_tags))
# Grab the Snapshot ID
try:
snapshot_id = ami_info['Images'][0]['BlockDeviceMappings'][0]['Ebs']['SnapshotId']
except KeyError:
_, ex, trace = sys.exc_info()
raise ImageUtilError('{n}: Unable to determine Snapshot ID for AMI ID {a}\n{e}'.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
log.info('Found Snapshot ID of the current image: {s}'.format(s=snapshot_id))
# Grab the Image name
try:
image_name = ami_info['Images'][0]['Name']
except KeyError:
_, ex, trace = sys.exc_info()
raise ImageUtilError('{n}: Unable to determine Image name for AMI ID {a}\n{e}'.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
log.info('Found name of the current image: {n}'.format(n=image_name))
# Grab the Image description
try:
image_description = ami_info['Images'][0]['Description']
except KeyError:
_, ex, trace = sys.exc_info()
log.warn('{n}: Unable to determine Image description for AMI ID {a}\n{e}'.format(
n=ex.__class__.__name__, a=ami_id, e=str(ex)))
image_description = 'CONS3RT OS Template'
log.info('Using description of the current image: {d}'.format(d=image_description))
# Deregister the image
log.debug('De-registering image ID: {a}...'.format(a=ami_id))
try:
self.ec2.deregister_image(DryRun=False, ImageId=ami_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to de-register AMI ID: {a}\n{e}'.format(n=ex.__class__.__name__, a=ami_id, e=str(ex))
raise ImageUtilError, msg, trace
log.info('De-registered image ID: {a}'.format(a=ami_id))
# Wait 20 seconds
log.info('Waiting 20 seconds for the image to de-register...')
time.sleep(20)
# Create the new image
log.info('Creating new image from instance ID: {i}'.format(i=instance_id))
try:
create_res = self.ec2.create_image(
DryRun=False,
InstanceId=instance_id,
Name=image_name,
Description=image_description,
NoReboot=False
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem creating an image named [{m}] for image ID: {i}\n{e}'.format(
n=ex.__class__.__name__, m=image_name, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
# Get the new Image ID
try:
new_ami_id = create_res['ImageId']
except KeyError:
_, ex, trace = sys.exc_info()
msg = '{n}: Image ID not found in the image creation response for instance ID: {i}\n{e}'.format(
n=ex.__class__.__name__, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
log.info('Created new image ID: {w}'.format(w=new_ami_id))
# Wait 20 seconds
log.info('Waiting 20 seconds for the image ID {w} to become available...'.format(w=new_ami_id))
time.sleep(20)
# Add tags to the new AMI
try:
self.ec2.create_tags(DryRun=False, Resources=[new_ami_id], Tags=image_tags)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem adding tags to the new image ID: {i}\n\nTags: {t}\n{e}'.format(
n=ex.__class__.__name__, i=new_ami_id, t=image_tags, e=str(ex))
raise ImageUtilError, msg, trace
log.info('Successfully added tags to the new image ID: {w}\nTags: {t}'.format(w=new_ami_id, t=image_tags))
# Delete the Snapshot ID
log.debug('Deleting snapshot for the old image with ID: {s}'.format(s=snapshot_id))
try:
self.ec2.delete_snapshot(DryRun=False, SnapshotId=snapshot_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to delete snapshot ID: {s}\n{e}'.format(
n=ex.__class__.__name__, s=snapshot_id, e=str(ex))
raise ImageUtilError, msg, trace | Replaces an existing AMI ID with an image created from the provided
instance ID
:param ami_id: (str) ID of the AMI to delete and replace
:param instance_id: (str) ID of the instance ID to create an image from
:return: None | entailment |
def create_cons3rt_template(self, instance_id, name, description='CONS3RT OS template'):
"""Created a new CONS3RT-ready template from an instance ID
:param instance_id: (str) Instance ID to create the image from
:param name: (str) Name of the new image
:param description: (str) Description of the new image
:return: None
"""
log = logging.getLogger(self.cls_logger + '.create_cons3rt_template')
if not isinstance(instance_id, basestring):
msg = 'Arg instance_id must be of type basestring, found: {t}'.format(t=instance_id.__class__.__name__)
raise ImageUtilError(msg)
if not isinstance(name, basestring):
msg = 'Arg name must be of type basestring, found: {t}'.format(t=instance_id.__class__.__name__)
raise ImageUtilError(msg)
if instance_id is None or name is None:
raise ImageUtilError('The provided args instance_id or name must not be None')
# Create the new image
log.info('Creating new image from instance ID: {i}'.format(i=instance_id))
try:
create_res = self.ec2.create_image(
DryRun=False,
InstanceId=instance_id,
Name=name,
Description=description,
NoReboot=False
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem creating an image named [{m}] for image ID: {i}\n{e}'.format(
n=ex.__class__.__name__, m=name, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
# Get the new Image ID
try:
new_ami_id = create_res['ImageId']
except KeyError:
_, ex, trace = sys.exc_info()
msg = '{n}: Image ID not found in the image creation response for instance ID: {i}\n{e}'.format(
n=ex.__class__.__name__, i=instance_id, e=str(ex))
raise ImageUtilError, msg, trace
log.info('Created new image ID: {w}'.format(w=new_ami_id))
# Wait 20 seconds
log.info('Waiting 20 seconds for the image ID {w} to become available...'.format(w=new_ami_id))
time.sleep(20)
# Add tags to the new AMI
try:
self.ec2.create_tags(DryRun=False, Resources=[new_ami_id], Tags=default_image_tags)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem adding tags to the new image ID: {i}\n\nTags: {t}\n{e}'.format(
n=ex.__class__.__name__, i=new_ami_id, t=default_image_tags, e=str(ex))
raise ImageUtilError, msg, trace
log.info('Successfully added tags to the new image ID: {w}\nTags: {t}'.format(
w=new_ami_id, t=default_image_tags)) | Created a new CONS3RT-ready template from an instance ID
:param instance_id: (str) Instance ID to create the image from
:param name: (str) Name of the new image
:param description: (str) Description of the new image
:return: None | entailment |
def copy_cons3rt_template(self, ami_id):
"""
:param ami_id:
:return:
"""
log = logging.getLogger(self.cls_logger + '.copy_cons3rt_template')
# Get the current AMI info
try:
ami_info = self.ec2.describe_images(DryRun=False, ImageIds=[ami_id], Owners=[self.owner_id])
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to describe image ID: {a}.\n{e}'.format(n=ex.__class__.__name__, a=ami_id, e=str(ex))
raise AWSAPIError, msg, trace
log.debug('Found AMI info: {a}'.format(a=ami_info))
# Grab the current cons3rtuuid tag data
cons3rt_uuid = None
try:
image_tags = ami_info['Images'][0]['Tags']
for image_tag in image_tags:
if image_tag['Key'] == 'cons3rtuuid':
cons3rt_uuid = image_tag['Value']
except KeyError:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to find image tags for AMI ID: {a}\n{e}'.format(
a=ami_id, n=ex.__class__.__name__, e=str(ex))
raise ImageUtilError, msg, trace
if cons3rt_uuid is None:
raise ImageUtilError('AMI tag cons3rtuuid not found on image ID: {a}'.format(a=ami_id))
log.info('Found image tag for cons3rtuuid: {u}'.format(u=cons3rt_uuid))
log.debug('Found image tags: {t}'.format(t=image_tags)) | :param ami_id:
:return: | entailment |
def uniorbytes(s, result=str, enc="utf-8", err="strict"):
"""
This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result
"""
if isinstance(s, result):
# the input is the desired one, return as is
return s
if isinstance(s, (bytes, str)):
# the input is either a byte or a string, convert to desired
# result (result=bytes or str)
if isinstance(s, bytes) and result == str:
return s.decode(enc, err)
elif isinstance(s, str) and result == bytes:
return s.encode(enc)
else:
return str(s or ("" if s is None else s), enc)
elif isinstance(s, (float, int, decimal.Decimal)):
return uniorbytes(str(s), result, enc, err)
elif isinstance(s, dict):
# the input is a dict {}
for k, item in list(s.items()):
s[k] = uniorbytes(item, result=result, enc=enc, err=err)
return s
elif hasattr(s, '__iter__'):
# the input is iterable
for i, item in enumerate(s):
s[i] = uniorbytes(item, result=result, enc=enc, err=err)
return s
return s | This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result | entailment |
def recode_unicode(s, encoding='utf-8'):
""" Inputs are encoded to utf-8 and then decoded to the desired
output encoding
@encoding: the desired encoding
-> #str with the desired @encoding
"""
if isinstance(s, str):
return s.encode().decode(encoding)
return s | Inputs are encoded to utf-8 and then decoded to the desired
output encoding
@encoding: the desired encoding
-> #str with the desired @encoding | entailment |
def fix_bad_unicode(text):
u"""Copyright:
http://blog.luminoso.com/2012/08/20/fix-unicode-mistakes-with-python/
Something you will find all over the place, in real-world text, is text
that's mistakenly encoded as utf-8, decoded in some ugly format like
latin-1 or even Windows codepage 1252, and encoded as utf-8 again.
This causes your perfectly good Unicode-aware code to end up with garbage
text because someone else (or maybe "someone else") made a mistake.
This function looks for the evidence of that having happened and fixes it.
It determines whether it should replace nonsense sequences of single-byte
characters that were really meant to be UTF-8 characters, and if so, turns
them into the correctly-encoded Unicode character that they were meant to
represent.
The input to the function must be Unicode. It's not going to try to
auto-decode bytes for you -- then it would just create the problems it's
supposed to fix.
>>> print fix_bad_unicode(u'único')
único
>>> print fix_bad_unicode(u'This text is fine already :þ')
This text is fine already :þ
Because these characters often come from Microsoft products, we allow
for the possibility that we get not just Unicode characters 128-255, but
also Windows's conflicting idea of what characters 128-160 are.
>>> print fix_bad_unicode(u'This — should be an em dash')
This — should be an em dash
We might have to deal with both Windows characters and raw control
characters at the same time, especially when dealing with characters like
\x81 that have no mapping in Windows.
>>> print fix_bad_unicode(u'This text is sad .â\x81”.')
This text is sad .⁔.
This function even fixes multiple levels of badness:
>>> wtf = u'\xc3\xa0\xc2\xb2\xc2\xa0_\xc3\xa0\xc2\xb2\xc2\xa0'
>>> print fix_bad_unicode(wtf)
ಠ_ಠ
However, it has safeguards against fixing sequences of letters and
punctuation that can occur in valid text:
>>> print fix_bad_unicode(u'not such a fan of Charlotte Brontë…”')
not such a fan of Charlotte Brontë…”
Cases of genuine ambiguity can sometimes be addressed by finding other
characters that are not double-encoding, and expecting the encoding to
be consistent:
>>> print fix_bad_unicode(u'AHÅ™, the new sofa from IKEA®')
AHÅ™, the new sofa from IKEA®
Finally, we handle the case where the text is in a single-byte encoding
that was intended as Windows-1252 all along but read as Latin-1:
>>> print fix_bad_unicode(u'This text was never Unicode at all\x85')
This text was never Unicode at all…
"""
if not isinstance(text, str):
return text
if len(text) == 0:
return text
maxord = max(ord(char) for char in text)
tried_fixing = []
if maxord < 128:
# Hooray! It's ASCII!
return text
else:
attempts = [(text, text_badness(text) + len(text))]
if maxord < 256:
tried_fixing = reinterpret_latin1_as_utf8(text)
tried_fixing2 = reinterpret_latin1_as_windows1252(text)
attempts.append((tried_fixing, text_cost(tried_fixing)))
attempts.append((tried_fixing2, text_cost(tried_fixing2)))
elif all(ord(char) in WINDOWS_1252_CODEPOINTS for char in text):
tried_fixing = reinterpret_windows1252_as_utf8(text)
attempts.append((tried_fixing, text_cost(tried_fixing)))
else:
# We can't imagine how this would be anything but valid text.
return text
# Sort the results by badness
attempts.sort(key=lambda x: x[1])
goodtext = attempts[0][0]
if goodtext == text:
return goodtext
else:
return fix_bad_unicode(goodtext) | u"""Copyright:
http://blog.luminoso.com/2012/08/20/fix-unicode-mistakes-with-python/
Something you will find all over the place, in real-world text, is text
that's mistakenly encoded as utf-8, decoded in some ugly format like
latin-1 or even Windows codepage 1252, and encoded as utf-8 again.
This causes your perfectly good Unicode-aware code to end up with garbage
text because someone else (or maybe "someone else") made a mistake.
This function looks for the evidence of that having happened and fixes it.
It determines whether it should replace nonsense sequences of single-byte
characters that were really meant to be UTF-8 characters, and if so, turns
them into the correctly-encoded Unicode character that they were meant to
represent.
The input to the function must be Unicode. It's not going to try to
auto-decode bytes for you -- then it would just create the problems it's
supposed to fix.
>>> print fix_bad_unicode(u'único')
único
>>> print fix_bad_unicode(u'This text is fine already :þ')
This text is fine already :þ
Because these characters often come from Microsoft products, we allow
for the possibility that we get not just Unicode characters 128-255, but
also Windows's conflicting idea of what characters 128-160 are.
>>> print fix_bad_unicode(u'This — should be an em dash')
This — should be an em dash
We might have to deal with both Windows characters and raw control
characters at the same time, especially when dealing with characters like
\x81 that have no mapping in Windows.
>>> print fix_bad_unicode(u'This text is sad .â\x81”.')
This text is sad .⁔.
This function even fixes multiple levels of badness:
>>> wtf = u'\xc3\xa0\xc2\xb2\xc2\xa0_\xc3\xa0\xc2\xb2\xc2\xa0'
>>> print fix_bad_unicode(wtf)
ಠ_ಠ
However, it has safeguards against fixing sequences of letters and
punctuation that can occur in valid text:
>>> print fix_bad_unicode(u'not such a fan of Charlotte Brontë…”')
not such a fan of Charlotte Brontë…”
Cases of genuine ambiguity can sometimes be addressed by finding other
characters that are not double-encoding, and expecting the encoding to
be consistent:
>>> print fix_bad_unicode(u'AHÅ™, the new sofa from IKEA®')
AHÅ™, the new sofa from IKEA®
Finally, we handle the case where the text is in a single-byte encoding
that was intended as Windows-1252 all along but read as Latin-1:
>>> print fix_bad_unicode(u'This text was never Unicode at all\x85')
This text was never Unicode at all… | entailment |
def text_badness(text):
u'''
Look for red flags that text is encoded incorrectly:
Obvious problems:
- The replacement character \ufffd, indicating a decoding error
- Unassigned or private-use Unicode characters
Very weird things:
- Adjacent letters from two different scripts
- Letters in scripts that are very rarely used on computers (and
therefore, someone who is using them will probably get Unicode right)
- Improbable control characters, such as 0x81
Moderately weird things:
- Improbable single-byte characters, such as ƒ or ¬
- Letters in somewhat rare scripts
'''
assert isinstance(text, str)
errors = 0
very_weird_things = 0
weird_things = 0
prev_letter_script = None
unicodedata_name = unicodedata.name
unicodedata_category = unicodedata.category
for char in text:
index = ord(char)
if index < 256:
# Deal quickly with the first 256 characters.
weird_things += SINGLE_BYTE_WEIRDNESS[index]
if SINGLE_BYTE_LETTERS[index]:
prev_letter_script = 'latin'
else:
prev_letter_script = None
else:
category = unicodedata_category(char)
if category == 'Co':
# Unassigned or private use
errors += 1
elif index == 0xfffd:
# Replacement character
errors += 1
elif index in WINDOWS_1252_GREMLINS:
lowchar = char.encode('WINDOWS_1252').decode('latin-1')
weird_things += SINGLE_BYTE_WEIRDNESS[ord(lowchar)] - 0.5
if category[0] == 'L':
# It's a letter. What kind of letter? This is typically found
# in the first word of the letter's Unicode name.
name = unicodedata_name(char)
scriptname = name.split()[0]
freq, script = SCRIPT_TABLE.get(scriptname, (0, 'other'))
if prev_letter_script:
if script != prev_letter_script:
very_weird_things += 1
if freq == 1:
weird_things += 2
elif freq == 0:
very_weird_things += 1
prev_letter_script = script
else:
prev_letter_script = None
return 100 * errors + 10 * very_weird_things + weird_things | u'''
Look for red flags that text is encoded incorrectly:
Obvious problems:
- The replacement character \ufffd, indicating a decoding error
- Unassigned or private-use Unicode characters
Very weird things:
- Adjacent letters from two different scripts
- Letters in scripts that are very rarely used on computers (and
therefore, someone who is using them will probably get Unicode right)
- Improbable control characters, such as 0x81
Moderately weird things:
- Improbable single-byte characters, such as ƒ or ¬
- Letters in somewhat rare scripts | entailment |
def get_pycons3rt_home_dir():
"""Returns the pycons3rt home directory based on OS
:return: (str) Full path to pycons3rt home
:raises: OSError
"""
if platform.system() == 'Linux':
return os.path.join(os.path.sep, 'etc', 'pycons3rt')
elif platform.system() == 'Windows':
return os.path.join('C:', os.path.sep, 'pycons3rt')
elif platform.system() == 'Darwin':
return os.path.join(os.path.expanduser('~'), '.pycons3rt')
else:
raise OSError('Unsupported Operating System') | Returns the pycons3rt home directory based on OS
:return: (str) Full path to pycons3rt home
:raises: OSError | entailment |
def initialize_pycons3rt_dirs():
"""Initializes the pycons3rt directories
:return: None
:raises: OSError
"""
for pycons3rt_dir in [get_pycons3rt_home_dir(),
get_pycons3rt_user_dir(),
get_pycons3rt_conf_dir(),
get_pycons3rt_log_dir(),
get_pycons3rt_src_dir()]:
if os.path.isdir(pycons3rt_dir):
continue
try:
os.makedirs(pycons3rt_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(pycons3rt_dir):
pass
else:
msg = 'Unable to create directory: {d}'.format(d=pycons3rt_dir)
raise OSError(msg) | Initializes the pycons3rt directories
:return: None
:raises: OSError | entailment |
def main():
# Create the pycons3rt directories
try:
initialize_pycons3rt_dirs()
except OSError as ex:
traceback.print_exc()
return 1
# Replace log directory paths
log_dir_path = get_pycons3rt_log_dir() + os.path.sep
conf_contents = default_logging_conf_file_contents.replace(replace_str, log_dir_path)
# Create the logging config file
logging_config_file_dest = os.path.join(get_pycons3rt_conf_dir(), 'pycons3rt-logging.conf')
with open(logging_config_file_dest, 'w') as f:
f.write(conf_contents)
"""
for line in fileinput.input(logging_config_file_dest, inplace=True):
if re.search(replace_str, line):
new_line = re.sub(replace_str, log_dir_path, line, count=0)
sys.stdout.write(new_line)
else:
sys.stdout.write(line)
"""
return 0 | for line in fileinput.input(logging_config_file_dest, inplace=True):
if re.search(replace_str, line):
new_line = re.sub(replace_str, log_dir_path, line, count=0)
sys.stdout.write(new_line)
else:
sys.stdout.write(line) | entailment |
def add_hooks():
# type: () -> None
""" Add git hooks for commit and push to run linting and tests. """
# Detect virtualenv the hooks should use
# Detect virtualenv
virtual_env = conf.getenv('VIRTUAL_ENV')
if virtual_env is None:
log.err("You are not inside a virtualenv")
confirm_msg = (
"Are you sure you want to use global python installation "
"to run your git hooks? [y/N] "
)
click.prompt(confirm_msg, default=False)
if not click.confirm(confirm_msg):
log.info("Cancelling")
return
load_venv = ''
else:
load_venv = 'source "{}/bin/activate"'.format(virtual_env)
commit_hook = conf.proj_path('.git/hooks/pre-commit')
push_hook = conf.proj_path('.git/hooks/pre-push')
# Write pre-commit hook
log.info("Adding pre-commit hook <33>{}", commit_hook)
fs.write_file(commit_hook, util.remove_indent('''
#!/bin/bash
PATH="/opt/local/libexec/gnubin:$PATH"
{load_venv}
peltak lint --commit
'''.format(load_venv=load_venv)))
# Write pre-push hook
log.info("Adding pre-push hook: <33>{}", push_hook)
fs.write_file(push_hook, util.remove_indent('''
#!/bin/bash
PATH="/opt/local/libexec/gnubin:$PATH"
{load_venv}
peltak test --allow-empty
'''.format(load_venv=load_venv)))
log.info("Making hooks executable")
if not context.get('pretend', False):
os.chmod(conf.proj_path('.git/hooks/pre-commit'), 0o755)
os.chmod(conf.proj_path('.git/hooks/pre-push'), 0o755) | Add git hooks for commit and push to run linting and tests. | entailment |
def start(name):
# type: (str) -> None
""" Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
"""
feature_name = 'feature/' + common.to_branch_name(name)
develop = conf.get('git.devel_branch', 'develop')
common.assert_on_branch(develop)
common.git_checkout(feature_name, create=True) | Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature. | entailment |
def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
develop = conf.get('git.devel_branch', 'develop')
common.assert_branch_type('feature')
common.git_checkout(develop)
common.git_pull(develop)
common.git_checkout(branch.name)
common.git_merge(branch.name, develop) | Update the feature with updates committed to develop.
This will merge current develop into the current branch. | entailment |
def merged():
# type: () -> None
""" Cleanup a remotely merged branch. """
develop = conf.get('git.devel_branch', 'develop')
branch = git.current_branch(refresh=True)
common.assert_branch_type('feature')
# Pull develop with the merged feature
common.git_checkout(develop)
common.git_pull(develop)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(develop) | Cleanup a remotely merged branch. | entailment |
def clean(exclude):
# type: (bool, List[str]) -> None
""" Remove all unnecessary files.
Args:
pretend (bool):
If set to **True**, do not delete any files, just show what would be
deleted.
exclude (list[str]):
A list of path patterns to exclude from deletion.
"""
pretend = context.get('pretend', False)
exclude = list(exclude) + conf.get('clean.exclude', [])
clean_patterns = conf.get('clean.patterns', [
'*__pycache__*',
'*.py[cod]',
'*.swp',
])
num_files = 0
with util.timed_block() as t:
files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude)
for path in files:
try:
num_files += 1
if not isdir(path):
log.info(' <91>[file] <90>{}', path)
not pretend and os.remove(path)
else:
log.info(' <91>[dir] <90>{}', path)
not pretend and rmtree(path)
except OSError:
log.info("<33>Failed to remove <90>{}", path)
if pretend:
msg = "Would delete <33>{}<32> files. Took <33>{}<32>s"
else:
msg = "Deleted <33>{}<32> files in <33>{}<32>s"
log.info(msg.format(num_files, t.elapsed_s)) | Remove all unnecessary files.
Args:
pretend (bool):
If set to **True**, do not delete any files, just show what would be
deleted.
exclude (list[str]):
A list of path patterns to exclude from deletion. | entailment |
def init(quick):
# type: () -> None
""" Create an empty pelconf.yaml from template """
config_file = 'pelconf.yaml'
prompt = "-- <35>{} <32>already exists. Wipe it?<0>".format(config_file)
if exists(config_file) and not click.confirm(shell.fmt(prompt)):
log.info("Canceled")
return
form = InitForm().run(quick=quick)
log.info('Writing <35>{}'.format(config_file))
pelconf_template = conf.load_template('pelconf.yaml')
fs.write_file(config_file, pelconf_template.format(**form.values)) | Create an empty pelconf.yaml from template | entailment |
def tracebacks_from_lines(lines_iter):
"""Generator that yields tracebacks found in a lines iterator
The lines iterator can be:
- a file-like object
- a list (or deque) of lines.
- any other iterable sequence of strings
"""
tbgrep = TracebackGrep()
for line in lines_iter:
tb = tbgrep.process(line)
if tb:
yield tb | Generator that yields tracebacks found in a lines iterator
The lines iterator can be:
- a file-like object
- a list (or deque) of lines.
- any other iterable sequence of strings | entailment |
def tracebacks_from_file(fileobj, reverse=False):
"""Generator that yields tracebacks found in a file object
With reverse=True, searches backwards from the end of the file.
"""
if reverse:
lines = deque()
for line in BackwardsReader(fileobj):
lines.appendleft(line)
if tb_head in line:
yield next(tracebacks_from_lines(lines))
lines.clear()
else:
for traceback in tracebacks_from_lines(fileobj):
yield traceback | Generator that yields tracebacks found in a file object
With reverse=True, searches backwards from the end of the file. | entailment |
def BackwardsReader(file, BLKSIZE = 4096):
"""Read a file line by line, backwards"""
buf = ""
file.seek(0, 2)
lastchar = file.read(1)
trailing_newline = (lastchar == "\n")
while 1:
newline_pos = buf.rfind("\n")
pos = file.tell()
if newline_pos != -1:
# Found a newline
line = buf[newline_pos+1:]
buf = buf[:newline_pos]
if pos or newline_pos or trailing_newline:
line += "\n"
yield line
elif pos:
# Need to fill buffer
toread = min(BLKSIZE, pos)
file.seek(pos-toread, 0)
buf = file.read(toread) + buf
file.seek(pos-toread, 0)
if pos == toread:
buf = "\n" + buf
else:
# Start-of-file
return | Read a file line by line, backwards | entailment |
def inversefunc(func,
y_values=None,
domain=None,
image=None,
open_domain=None,
args=(),
accuracy=2):
r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01])
"""
domain, image, open_domain, args = _normparams_inversefunc(domain,
image,
open_domain,
args)
ymin, ymax = image
xmin, xmax = domain
xmin_open, xmax_open = open_domain
# Calculating if the function is increasing or decreasing, using ref points
# anywhere in the valid range (Function has to be strictly monotonic)
ref1, ref2 = _get_valid_refpoints(xmin, xmax)
trend = np.sign(func(ref2, *args) - func(ref1, *args))
if trend == 0:
raise ValueError("Function is not strictly monotonic")
# Calculating the image by default
if ymin is None:
ymin = _auto_ymin(func, args, xmin, xmax, trend)
if ymax is None:
ymax = _auto_ymax(func, args, xmin, xmax, trend)
# Creating bounded function
def bounded_f(x):
if xmin is not None and (x < xmin or (x == xmin and xmin_open)):
val = -1 * np.inf * trend
elif xmax is not None and (x > xmax or (x == xmax and xmax_open)):
val = np.inf * trend
else:
val = func(x, *args)
return val
min_kwargs = {}
min_kwargs['bracket'] = (ref1, ref2)
min_kwargs['tol'] = 1.48e-08
min_kwargs['method'] = 'Brent'
def inv(yin):
yin = np.asarray(yin, dtype=np.float64)
shapein = yin.shape
yin = yin.flatten()
if ymin is not None:
if (xmin_open and trend == 1) or (xmax_open and trend == -1):
mask = yin <= ymin
else:
mask = yin < ymin
if yin[mask].size > 0:
raise ValueError("Requested values %s lower than the"
" lower limit %g of the image" %
(yin[mask], ymin))
if ymax is not None:
if (xmax_open and trend == 1) or (xmin_open and trend == -1):
mask = yin >= ymax
else:
mask = yin > ymax
if yin[mask].size > 0:
raise ValueError("Requested values %s higher than the"
" higher limit %g of the image" %
(yin[mask], ymax))
results = yin.copy() * np.nan
resultsmask = np.zeros(yin.shape, dtype=np.bool)
for j in range(yin.size):
if xmax is not None:
if bounded_f(xmax) == yin[j]:
results[j] = xmax
resultsmask[j] = True
continue
if xmin is not None:
if bounded_f(xmin) == yin[j]:
results[j] = xmin
resultsmask[j] = True
continue
optimizer = (lambda x, j=j,
bounded_f=bounded_f: (((bounded_f(x) - yin[j]))**2))
try:
with warnings.catch_warnings(record=True):
result = minimize_scalar(optimizer, **min_kwargs)
results[j] = result.x
resultsmask[j] = result.success
except:
resultsmask[j] = False
if any(~resultsmask):
warnings.warn("Trouble calculating inverse for values: "
"%s" % str(yin[~resultsmask]), RuntimeWarning)
try:
np.testing.assert_array_almost_equal(yin, func(results, *args),
decimal=accuracy)
except AssertionError:
warnings.warn("Results obtained with less than %g "
"decimal digits of accuracy"
% accuracy, RuntimeWarning)
return results.reshape(shapein)
if y_values is None:
return inv
else:
return inv(y_values) | r"""Obtain the inverse of a function.
Returns the numerical inverse of the function `f`. It may return a callable
that can be used to calculate the inverse, or the inverse of certain points
depending on the `y_values` argument.
In order for the numerical inverse to exist in its domain, the
input function must have, continuous, strictly monotonic behavior i.e. be
purely decreasing or purely increasing in that domain. By default the
domain interval spans all the real numbers, however it can be restricted
with the `domain` and `open_domain` arguments. The image of the function
in the interval may be provided, for cases where the function is non
continuous right at the end of an open interval.
Parameters
----------
func : callable
Callable representing the function to be inverted, able to take a
ndarray or an scalar and return an object of the same kind with the
evaluation of the function. If `func` takes many arguments, it is
inverted along the axis corresponding to the first argument.
The function must not diverge and have a continuous strictly monotonic
behavior in the chosen interval.
y_values : float, ndarray, optional
Values for which calculate the inverse function. If set to None, then
a callable that can be used to calculate the inverse of values is
returned. Default None.
domain : float, ndarray, optional
Boundaries of the domain (`domain[0]`, `domain[1]`).
`domain[1]` must be larger than `domain[0]`.
None values are assumed to be no boundary in that direction.
A single scalar value will set it to [`domain`, None].
Default None (-Inf, Inf).
open_domain : bool, ndarray, optional
Whether the domain is an open interval at each of the ends.
A single scalar boolean will set it to [`open_domain`, `open_domain`].
Default None [False, False].
image : float, ndarray, optional
Image of the function in the domain (`image[0]`, `image[1]`).
`image[1]` must be larger than `image[0]`.
None values are assumed to be no boundary in that direction.
Default None, this is (-Inf, Inf) if domain is None, or the limits
set by func(domain[0]) and func(domain[1]).
args : tuple, optional
Extra arguments to pass to `func`. Default ().
accuracy : int, optional
Number of digits for the desired accuracy. It will give a warning
if the accuracy is worse than this.
Default 2.
Returns
-------
callable or ndarray
Inverse function of `func`. It can take scalars or ndarrays, and return
objects of the same kind with the calculated inverse values.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from pynverse import inversefunc
>>> import numpy as np
>>> cube = (lambda x: x**3)
>>> invcube = inversefunc(cube)
>>> invcube(27) # Should give 3
array(3.0000000063797567)
>>> invsquare = inversefunc(np.power, args=(2), domain=0)
>>> invsquare([4, 16, 64]) # Should give [2, 4, 8]
array([ 2., 4., 8.])
>>> inversefunc(np.log10, y_values=-2, # Should give 0.01
... domain=0, open_domain=True)
array(0.0099999999882423)
>>> inversefunc(np.cos, y_values=[1, 0, -1], # Should give [0, pi / 2, pi]
... domain=[0, np.pi])
array([ 0. , 1.57079632, 3.14159265])
>>> invtan = inversefunc(np.tan,
... domain=[-np.pi / 2, np.pi / 2],
... open_domain=True)
>>> invtan([1, 0, -1]) # Should give [pi / 4, 0, -pi / 4]
array([ 7.85398163e-01, 1.29246971e-26, -7.85398163e-01]) | entailment |
def progressed_bar(count, total=100, status=None, suffix=None, bar_len=10):
"""render a progressed.io like progress bar"""
status = status or ''
suffix = suffix or '%'
assert isinstance(count, int)
count_normalized = count if count <= total else total
filled_len = int(round(bar_len * count_normalized / float(total)))
percents = 100.0 * count / float(total)
color = '#5cb85c'
if percents < 30.0:
color = '#d9534f'
if percents < 70.0:
color = '#f0ad4e'
text_color = colors.fg(color)
bar_color = text_color + colors.bg(color)
nc_color = colors.dark_gray
progressbar = (colors.bg('#428bca') | status) if status else ''
progressbar += (bar_color | ('█' * filled_len))
progressbar += (nc_color | ('█' * (bar_len - filled_len)))
progressbar += (text_color | (str(count) + suffix))
return progressbar | render a progressed.io like progress bar | entailment |
def prettify(string):
"""
replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
```
"""
string = emojize(string, use_aliases=True) if emojize else string
string = progressed(string)
return string | replace markup emoji and progressbars with actual things
# Example
```python
from habitipy.util import prettify
print(prettify('Write thesis :book: '))
```
```
Write thesis 📖 ██████████0%
``` | entailment |
def assert_secure_file(file):
"""checks if a file is stored securely"""
if not is_secure_file(file):
msg = """
File {0} can be read by other users.
This is not secure. Please run 'chmod 600 "{0}"'"""
raise SecurityError(dedent(msg).replace('\n', ' ').format(file))
return True | checks if a file is stored securely | entailment |
def get_translation_for(package_name: str) -> gettext.NullTranslations:
"""find and return gettext translation for package"""
localedir = None
for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None:
localefile = gettext.find(package_name, localedir) # type: ignore
if localefile:
break
else:
pass
return gettext.translation(package_name, localedir=localedir, fallback=True) | find and return gettext translation for package | entailment |
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names] | finds and installs translation functions for package | entailment |
def escape_keywords(arr):
"""append _ to all python keywords"""
for i in arr:
i = i if i not in kwlist else i + '_'
i = i if '-' not in i else i.replace('-', '_')
yield i | append _ to all python keywords | entailment |
def download_api(branch=None) -> str:
"""download API documentation from _branch_ of Habitica\'s repo on Github"""
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica'
if not branch:
branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name']
curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)]
tar = local['tar'][
'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout']
grep = local['grep']['@api']
sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-']
return (curl | tar | grep | sed)() | download API documentation from _branch_ of Habitica\'s repo on Github | entailment |
def save_apidoc(text: str) -> None:
"""save `text` to apidoc cache"""
apidoc_local = local.path(APIDOC_LOCAL_FILE)
if not apidoc_local.dirname.exists():
apidoc_local.dirname.mkdir()
with open(apidoc_local, 'w') as f:
f.write(text) | save `text` to apidoc cache | entailment |
def parse_apidoc(
file_or_branch,
from_github=False,
save_github_version=True
) -> List['ApiEndpoint']:
"""read file and parse apiDoc lines"""
apis = [] # type: List[ApiEndpoint]
regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *'
regex += r'(?P<field>[^ ]*) *(?P<description>.*)$'
param_regex = re.compile(r'^@apiParam {1,}' + regex)
success_regex = re.compile(r'^@apiSuccess {1,}' + regex)
if from_github:
text = download_api(file_or_branch)
if save_github_version:
save_apidoc(text)
else:
with open(file_or_branch) as f:
text = f.read()
for line in text.split('\n'):
line = line.replace('\n', '')
if line.startswith('@api '):
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
split_line = line.split(' ')
assert len(split_line) >= 3
method = split_line[1]
uri = split_line[2]
assert method[0] == '{'
assert method[-1] == '}'
method = method[1:-1]
if not uri.startswith(API_URI_BASE):
warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000
title = ' '.join(split_line[3:])
apis.append(ApiEndpoint(method, uri, title))
elif line.startswith('@apiParam '):
res = next(param_regex.finditer(line)).groupdict()
apis[-1].add_param(**res)
elif line.startswith('@apiSuccess '):
res = next(success_regex.finditer(line)).groupdict()
apis[-1].add_success(**res)
if apis:
if not apis[-1].retcode:
apis[-1].retcode = 200
return apis | read file and parse apiDoc lines | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.