code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import sys
class FileNotGivenError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def files():
fname_in = None
fname_out = None
try:
args = iter(sys.argv[1:])
while True:
nxt = next(args)
if nxt == '-i':
fname_in = next(args)
elif nxt == '-o':
fname_out = next(args)
else:
fname_in = nxt
except StopIteration:
if fname_in is None:
print('Usage: ./pixelate.py [-i] filename [-o filename]')
sys.exit(2)
elif fname_out is None:
index = fname_in.rfind('.')
fname_out = fname_in[:index] + '-pxl8d' + fname_in[index:]
return fname_in, fname_out
| aidan-fitz/pixelator | cmd.py | Python | mit | 810 |
import sys
import PyFBA.metabolism
class Reaction:
"""
A reaction is the central concept of metabolism and is the conversion of substrates to products.
The reaction describes what we know. At a bare minimum we need a a name for the reaction. The name can either be the
reaction id (e.g. modelSEED or KEGG id), or another name for this reaction.
A reaction is an object that describes how to get from one compound to another. We need to know what the compound(s)
on the left of the equation are, what the compounds on the right of the reaction are, and the probability that the
reaction proceeds in either direction. If the reaction is truly reversible the probability can be 1 in both cases.
If it is unidirectional the probability can be 0 in one direction.
The likelihood that a reaction completes will be some product of its delta G and its p. We could also do something
simpler, e.g. if there is a -ve delta G (favorable reaction) we can increase p and if there is a +ve delta G
(unfavorable reaction) we can decrease p.
The direction and reversible is the direction that the equation can run.
Acceptable values are:
====== ===========================
Value Meaning
====== ===========================
None We don't know the direction
> Left to right
< Right to left
= Bidirectional
====== ===========================
:ivar rctn_id: The reaction ID
:ivar readable_name: The name of the reaction
:ivar description: A description of the reaction
:ivar equation: The reaction equation
:ivar direction: The direction of the reaction (<, =, >, or ?)
:ivar gfdirection: The possible gapfilled direction
:ivar ntdirection: The non-template direction (before correcting for templates)
:ivar left_compounds: A set of CompoundWithLocations on the left side of the reaction
:ivar left_abundance: A dict of the CompoundWithLocations on the left and their abundance
:ivar right_compounds: The set of CompoundWithLocations on the right side of the equation
:ivar right_abundance: A dict of the CompoundWithLocations on the right and their abundance
:ivar lower_bound: The lower bound for the reaction
:ivar upper_bound: The upper bound for the reaction
:ivar pLR: The probability the reaction proceeds left to right
:ivar pRL: The probability the reaction proceeds right to left
:ivar enzymes: The enzyme complex IDs involved in the reaction
:ivar pegs: The protein-encoding genes involved in the reaction
:ivar deltaG: The delta G
:ivar deltaG_error: The error in the delta G
:ivar inp: Whether the reaction is an input reaction
:ivar outp: Whether the reaction is an output reaction
:ivar is_transport: Whether the reaction is a transport reaction (imports or exports something)
:ivar ran: Boolean to note whether the reaction ran
:ivar is_biomass_reaction: Boolean to note whether this is a biomass reaction
:ivar biomass_direction: If it is a biomass reaction, what is the direction
:ivar is_gapfilled: Boolean to note whether the reaction was gapfilled
:ivar gapfill_method: If the reaction was gapfilled, how was it gapfilled
:ivar is_uptake_secretion: Is the reaction involved in uptake of compounds or secretion of compounds.
"""
def __init__(self, rctn_id, readable_name=None, description=None, equation=None, direction=None):
"""
Instantiate a reaction
:param rctn_id: the reaction id
:param readable_name: a human readable name. This was refactored from name to make it more unique
:param description: a description of the reaction
:param equation: the equation for the reaction
:param direction: the direction of the reaction
"""
self.id = rctn_id
self.model_seed_id = rctn_id
self.readable_name = readable_name
self.description = description
self.equation = equation
self.direction = direction
self.gfdirection = direction # the gap filling direction
self.ntdirection = direction # the non-template driven direction
self.left_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation]
self.left_abundance = {}
self.right_compounds = set() # type: set[PyFBA.metabolism.CompoundWithLocation]
self.right_abundance = {}
self.lower_bound = None
self.upper_bound = None
self.pLR = 0
self.pRL = 0
self.enzymes = set()
self.ec_numbers = []
self.pegs = set()
self.deltaG_error = 0
self.deltaG = 0
self.inp = False
self.outp = False
self.is_transport = False
self.ran = False
self.is_biomass_reaction = False
self.biomass_direction = False
self.is_gapfilled = False
self.gapfill_method = ""
self.is_uptake_secretion = False
self.aliases = []
def __eq__(self, other):
"""
Two reactions are the same if they have the same left and
right products, but not necessarily the same names or reactions.
Note that we don't care whether the left and right (the
directionality) is the same in our two comparisons
:param other: The other reaction
:type other: Reaction
:return: Boolean
:rtype: bool
"""
if isinstance(other, Reaction):
return (self.id == other.id or
(self.left_compounds, self.right_compounds) ==
(other.left_compounds, other.right_compounds) or
(self.left_compounds, self.right_compounds) ==
(other.right_compounds, other.left_compounds)
)
else:
raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented")
def __cmp__(self, other):
"""
Compare whether two things are the same
:param other: The other reaction
:type other: Reaction
:return: an integer, zero if they are the same
:rtype: int
"""
if isinstance(other, Reaction):
if __eq__(other):
return 0
else:
return 1
else:
raise NotImplementedError(f"Comparing Reaction with {type(other)} is not implemented")
def __ne__(self, other):
"""
Are these not equal?
:param other: The other reaction
:type other: Reaction
:return: Boolean
:rtype: bool
"""
try:
result = self.__eq__(other)
except NotImplementedError:
return True
return not result
def __hash__(self):
"""
The hash function is based on the name of the reaction.
:rtype: int
"""
return hash((self.id, self.readable_name))
def __str__(self):
"""
The string version of the reaction.
:rtype: str
"""
if self.readable_name:
return f"{self.id}: {self.readable_name}"
else:
return f"{self.id}: {self.equation}"
"""
Since we have complex data structures, we can't just pickle them and unpickle them with aplomb!
In fact, this is affecting deep/shallow copy, and we need to ensure that we use copy.deepcopy()
at all times, otherwise the data structures are not copied correctly.
These two methods correctly allow us to pickle the data structures. Note that we have
CompoundWithLocation objects, and we need both the object and its abundance to correctly create the pickle.
"""
def __getstate__(self):
"""
The state that the object is saved or copied as. We override the left/right compounds and abundances
with simple arrays of data. This is lossy - we are losing the connections between compounds and data
and we probably need to reconstruct that after pickling/unpickling the reactions.
:return:
"""
state = self.__dict__.copy()
state['left_compounds'] = []
state['right_compounds'] = []
state['left_abundance'] = {}
state['right_abundance'] = {}
for l in self.left_compounds:
state['left_compounds'].append([l.id, l.name, l.location])
state['left_abundance'][f"{l.id} :: {l.name} :: {l.location}"] = self.left_abundance[l]
for r in self.right_compounds:
state['right_compounds'].append([r.id, r.name, r.location])
state['right_abundance'][f"{r.id} :: {r.name} :: {r.location}"] = self.right_abundance[r]
return state
def __setstate__(self, state):
"""
Create a new reaction from a saved state. This is from __getstate__ eg. when pickled.
:param state: the state that was saved.
:return:
"""
left = set()
right = set()
left_abundance = {}
right_abundance = {}
for l in state['left_compounds']:
c = PyFBA.metabolism.CompoundWithLocation(id=l[0], name=l[1], location=l[2])
left.add(c)
left_abundance[c] = state['left_abundance'][f"{l[0]} :: {l[1]} :: {l[2]}"]
state['left_compounds'] = left
state['left_abundance'] = left_abundance
for r in state['right_compounds']:
c = PyFBA.metabolism.CompoundWithLocation(id=r[0], name=r[1], location=r[2])
right.add(c)
right_abundance[c] = state['right_abundance'][f"{r[0]} :: {r[1]} :: {r[2]}"]
state['right_compounds'] = right
state['right_abundance'] = right_abundance
self.__dict__.update(state)
def set_direction(self, direction):
"""
Set the direction of the reaction.
:param direction: The direction of the reaction
:type direction: str
:rtype: str
:return: The current direction
"""
allowable_directions = {'>', '<', '=', None}
if direction in allowable_directions:
self.direction = direction
if not self.gfdirection:
self.gfdirection = direction
else:
sys.stderr.write("Direction: " + str(direction) + " is not a permitted direction. Ignored\n")
self.direction = None
return self.direction
def add_left_compounds(self, cmpds):
"""
The compounds on the left are a set of compounds that the reaction typically uses as substrates.
:param cmpds: The compounds that should be added
:type cmpds: set[PyFBA.metabolism.CompoundWithLocation]
"""
if isinstance(cmpds, set):
# choose one element. next(iter(cmpds)) does not remove the element
if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation):
raise TypeError(f"Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects not {type(next(iter(cmpds)))}")
self.left_compounds.update(cmpds)
elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation):
# add a single compound
self.left_compounds.add(cmpds)
else:
raise TypeError("Compounds must be a set of CompoundWithLocation")
def set_left_compound_abundance(self, cmpd, abundance):
"""
Set the abundance of a compound on the left side of the equation.
:param cmpd: The compound to set the abundance for
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:param abundance: The amount of that abundance
:type abundance: float | int
"""
if cmpd not in self.left_compounds:
raise KeyError(f"{cmpd} is not in left compounds. Please add it before trying to set the abundance")
if isinstance(abundance, float):
self.left_abundance[cmpd] = abundance
elif isinstance(abundance, int):
self.left_abundance[cmpd] = float(abundance)
else:
raise TypeError("Abundance must be an int or a float")
def get_left_compound_abundance(self, cmpd):
"""
Get the abundance of the compound on the left side of the equation.
:param cmpd: The compound to get the abundance of
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:return: The compounds abundance
:rtype: float
"""
if cmpd in self.left_abundance:
return self.left_abundance[cmpd]
else:
raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" +
f" {cmpd} on the left hand side of the equation: {self.equation}")
def number_of_left_compounds(self):
"""
The number of compounds on the left side of the equation.
:rtype: int
"""
return len(self.left_compounds)
def add_right_compounds(self, cmpds):
"""
The compounds on the right are a set of compounds that the reaction typically uses as substrates.
:param cmpds: The compounds that should be added
:type cmpds: set[PyFBA.metabolism.CompoundWithLocation]
"""
if isinstance(cmpds, set):
# choose one element. next(iter(cmpds)) does not remove the element
if not isinstance(next(iter(cmpds)), PyFBA.metabolism.CompoundWithLocation):
raise TypeError("Starting with v.2 reactions need PyFBA.metabolism.CompoundWithLocation objects")
self.right_compounds.update(cmpds)
elif isinstance(cmpds, PyFBA.metabolism.CompoundWithLocation):
# add a single compound
self.right_compounds.add(cmpds)
else:
raise TypeError("Compounds must be a set of CompoundWithLocation")
def set_right_compound_abundance(self, cmpd, abundance):
"""
Set the abundance of a compound on the right side of the equation
:param cmpd: The compound to set the abundance for
:type cmpd: PyFBA.metabolism.CompoundWithLocation
:param abundance: The amount of that abundance
:type abundance: float | int
"""
if cmpd not in self.right_compounds:
raise KeyError(f"{cmpd} is not in right compounds. " + " Please add it before trying to set the abundance")
if isinstance(abundance, float):
self.right_abundance[cmpd] = abundance
elif isinstance(abundance, int):
self.right_abundance[cmpd] = float(abundance)
else:
raise TypeError("Abundance must be an int or a float")
def get_right_compound_abundance(self, cmpd):
"""
Get the abundance of the compound on the right side of the equation.
:param cmpd: The compound to get the abundance of
:type cmpd: Compound
:return: The compounds abundance
:rtype: float
"""
if cmpd in self.right_abundance:
return self.right_abundance[cmpd]
else:
raise KeyError(f"In the reaction {self.readable_name} (reaction id: {self.id}), you do not have" +
f" {cmpd} on the right hand side of the equation: {self.equation}")
def number_of_right_compounds(self):
"""
The number of compounds on the right side of the equation.
:rtype: int
"""
return len(self.right_compounds)
def all_compounds(self):
"""
Get all the compounds involved in this reaction.
:return: A set of all the compounds
:rtype: set
"""
return self.left_compounds.union(self.right_compounds)
def number_of_compounds(self):
"""
Get the total number of compounds involved in this reaction.
:rtype: int
"""
return len(self.all_compounds())
def has(self, cmpd):
"""
Does this reaction have a compound? Just returns true if the compound is present somewhere in the reaction.
:param cmpd: The compound to test for
:type cmpd: Compound
:rtype: bool
"""
return cmpd in self.left_compounds or cmpd in self.right_compounds
def opposite_sides(self, cmpd1, cmpd2):
"""
Are these two compounds on opposite sides of the reaction?
:param cmpd1: The first compound
:type cmpd1: Compound
:param cmpd2: The second compound
:type cmpd2: Compound
:return: Whether the compounds are on opposite sides
:rtype: bool
"""
if not self.has(cmpd1):
raise ValueError(str(cmpd1) + " is not in this reaction")
if not self.has(cmpd2):
raise ValueError(str(cmpd2) + " is not in this reaction")
if cmpd1 in self.left_compounds and cmpd2 in self.right_compounds:
return True
if cmpd1 in self.right_compounds and cmpd2 in self.left_compounds:
return True
return False
def set_probability_left_to_right(self, p):
"""
Set the probability of the reaction running left to right. Note you can also access this as reaction.pLR
:param p: The probablity
:type p: float
"""
if isinstance(p, float):
self.pLR = p
elif isinstance(p, int):
self.pLR = float(p)
else:
raise TypeError("The probability must be an int or a float")
def get_probability_left_to_right(self):
"""
Get the probability of the reaction running left to right. Note you can also access this as reaction.pLR
:return: The probablity
:rtype p: float
"""
return self.pLR
def set_probability_right_to_left(self, p):
"""
Set the probability of the reaction running right to left Note you can also access this as reaction.pRL
:param p: The probablity
:type p: float
"""
if isinstance(p, float):
self.pRL = p
elif isinstance(p, int):
self.pRL = float(p)
else:
raise TypeError("The probability must be an int or a float")
def get_probability_right_to_left(self):
"""
Get the probability of the reaction running right to left. Note you can also access this as reaction.pRL
:return: The probablity
:rtype p: float
"""
return self.pRL
def add_enzymes(self, enz):
"""
Add one or more enzymes that completes this reaction.
:param enz: A set of enzymes that you want to add
:type enz: set
"""
if isinstance(enz, set):
self.enzymes.update(enz)
else:
raise TypeError("You need to supply a set of enzymes")
def has_enzyme(self, enz):
"""
Check whether an enzyme is involved in this reaction.
:param enz: An Enzyme object
:type enz: Enzyme
:return: Whether we have this enzyme
:rtype: bool
"""
return enz in self.enzymes
def all_enzymes(self):
"""
Get all the enzymes involved in this reaction. Returns a set of complex IDs.
:rtype: set
"""
return self.enzymes
def number_of_enzymes(self):
"""
Gets the number of enzymes involved in this reaction.
:rtype: int
"""
return len(self.enzymes)
def add_pegs(self, pegs):
"""
Add one or more pegs to this reaction. Pegs must be a set.
:param pegs: The pegs to add to the reaction
:type pegs: set
"""
if isinstance(pegs, set):
self.pegs.update(pegs)
else:
raise TypeError("pegs must be a set")
def has_peg(self, peg):
"""
Check whether a peg is involved in this reaction.
:param peg: The peg to check for
:type peg: str
:rtype: bool
"""
return peg in self.pegs
def set_deltaG(self, dg):
"""
Set the value for delta G (Gibbs free energy) for this reaction. Recall -ve deltaG means the reaction is
favorable.
:param dg: The delta G of the reaction
:type dg: float
"""
if isinstance(dg, float):
self.deltaG = dg
elif isinstance(dg, int):
self.deltaG = float(dg)
else:
raise TypeError("The delta G must be an int or a float")
def get_deltaG(self):
"""
Get the value for delta G (Gibbs free energy) for this reaction.
:rtype: float
"""
return self.deltaG
def check_input_output(self):
"""
Check whether this reaction is an input or output reaction.
This is called when we ask is_input_reaction / is_output_reaction and both inp and outp are False
"""
# do we have external compounds on the left ... then it is an input reaction
for c in self.left_compounds:
if c.location == 'e':
self.inp = True
for c in self.right_compounds:
if c.location == 'e':
self.outp = True
def toggle_input_reaction(self):
"""
Set this reaction as an input reaction. This only applies to
this reaction, so if it is true we set it false, else we set
it true
"""
if self.inp:
self.inp = False
else:
self.inp = True
def is_input_reaction(self):
"""
Is this an input reaction?
:rtype: bool
"""
if self.inp is False and self.outp is False:
self.check_input_output()
return self.inp
def toggle_output_reaction(self):
"""
Set this reaction as an output reaction. This only applies to
this reaction, so if it is true we set it false, else we set
it true
"""
if self.outp:
self.outp = False
else:
self.outp = True
def is_output_reaction(self):
"""
Is this an output reaction?
:rtype: bool
"""
if self.inp is False and self.outp is False:
self.check_input_output()
return self.outp
def reverse_reaction(self):
"""
Reverse the reaction - move the left compounds to the right,
and vice versa. We also switch the abundances and the pLR and
pRL.
We also negate the deltaG, since that should be the other way
around now.
At the moment we don't switch input/output, not sure if we
need to do that.
"""
(self.left_compounds, self.right_compounds) = (self.right_compounds, self.left_compounds)
(self.left_abundance, self.right_abundance) = (self.right_abundance, self.left_abundance)
(self.inp, self.outp) = (self.outp, self.inp)
# we only need to reverse two directions
if self.direction == '>':
self.direction = '<'
elif self.direction == '<':
self.direction = '>'
# we only need to reverse two gfdirections
if self.gfdirection == '>':
self.gfdirection = '<'
elif self.gfdirection == '<':
self.gfdirection = '>'
if self.lower_bound != None and self.upper_bound != None:
lbtemp = 0 - self.lower_bound
self.lower_bound = 0 - self.upper_bound
self.upper_bound = lbtemp
(self.pLR, self.pRL) = (self.pRL, self.pLR)
self.deltaG = -self.deltaG
def add_attribute(self, key, value):
"""
Add an attribute to this class
"""
setattr(self, key, value)
def get_attribute(self, key):
"""
Retrieve an attribute
"""
return getattr(self, key)
def reset_bounds(self):
"""
reset the bounds of this reaction. If we are using this in gapfilling, we need to reset the bounds
so we can calculate appropriately.
:return: None
"""
self.lower_bound = None
self.upper_bound = None | linsalrob/PyFBA | PyFBA/metabolism/reaction.py | Python | mit | 24,179 |
import sys
import numpy
import pytest
import cupy
from cupy import testing
class TestMisc:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(xp, name)(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
def check_binary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
return getattr(xp, name)(a, b)
@testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd', 'F', 'D'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_negative(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype)
if numpy.dtype(dtype).kind == 'c':
a += (a * 1j).astype(dtype)
return getattr(xp, name)(a)
@testing.for_dtypes(['e', 'f', 'd', 'F', 'D'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_inf(self, name, xp, dtype, **kwargs):
inf = numpy.inf
if numpy.dtype(dtype).kind != 'c':
a = xp.array([0, -1, 1, -inf, inf], dtype=dtype)
else:
a = xp.array([complex(x, y)
for x in [0, -1, 1, -inf, inf]
for y in [0, -1, 1, -inf, inf]],
dtype=dtype)
return getattr(xp, name)(a, **kwargs)
@testing.for_dtypes(['e', 'f', 'd', 'F', 'D'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_nan(self, name, xp, dtype, **kwargs):
nan = numpy.nan
if numpy.dtype(dtype).kind != 'c':
a = xp.array([0, -1, 1, -nan, nan], dtype=dtype)
else:
a = xp.array([complex(x, y)
for x in [0, -1, 1, -nan, nan]
for y in [0, -1, 1, -nan, nan]],
dtype=dtype)
return getattr(xp, name)(a, **kwargs)
@testing.for_dtypes(['e', 'f', 'd', 'F', 'D'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_inf_nan(self, name, xp, dtype):
inf = numpy.inf
nan = numpy.nan
if numpy.dtype(dtype).kind != 'c':
a = xp.array([0, -1, 1, -inf, inf, -nan, nan], dtype=dtype)
else:
a = xp.array([complex(x, y)
for x in [0, -1, 1, -inf, inf, -nan, nan]
for y in [0, -1, 1, -inf, inf, -nan, nan]],
dtype=dtype)
return getattr(xp, name)(a)
@testing.for_dtypes(['e', 'f', 'd', 'F', 'D'])
@testing.numpy_cupy_array_equal()
def check_binary_nan(self, name, xp, dtype):
a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
dtype=dtype)
b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
dtype=dtype)
return getattr(xp, name)(a, b)
@pytest.mark.skipIf(
sys.platform == 'win32', reason='dtype problem on Windows')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_clip1(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.clip(3, 13)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_clip3(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.clip(3, 13)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_clip_min_none(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.clip(None, 3)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_clip_max_none(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.clip(3, None)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
def test_clip_min_max_none(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
with pytest.raises(ValueError):
a.clip(None, None)
@pytest.mark.skipIf(
sys.platform == 'win32', reason='dtype problem on Windows')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_external_clip1(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.clip(a, 3, 13)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_external_clip2(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.clip(a, 3, 13)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_clip2(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
a_min = xp.array([3, 4, 5, 6], dtype=dtype)
a_max = xp.array([[10], [9], [8]], dtype=dtype)
return a.clip(a_min, a_max)
def test_sqrt(self):
self.check_unary('sqrt')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_cbrt(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.cbrt(a)
def test_square(self):
self.check_unary('square')
def test_absolute(self):
self.check_unary('absolute')
def test_absolute_negative(self):
self.check_unary_negative('absolute')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_fabs(self, xp, dtype):
a = xp.array([2, 3, 4], dtype=dtype)
return xp.fabs(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_fabs_negative(self, xp, dtype):
a = xp.array([-2.0, -4.0, 0.0, 4.0], dtype=dtype)
return xp.fabs(a)
def test_sign(self):
self.check_unary('sign', no_bool=True)
def test_sign_negative(self):
self.check_unary_negative('sign', no_bool=True)
def test_maximum(self):
self.check_binary('maximum')
def test_maximum_nan(self):
self.check_binary_nan('maximum')
def test_minimum(self):
self.check_binary('minimum')
def test_minimum_nan(self):
self.check_binary_nan('minimum')
def test_fmax(self):
self.check_binary('fmax')
def test_fmax_nan(self):
self.check_binary_nan('fmax')
def test_fmin(self):
self.check_binary('fmin')
def test_fmin_nan(self):
self.check_binary_nan('fmin')
def test_nan_to_num(self):
self.check_unary('nan_to_num')
def test_nan_to_num_negative(self):
self.check_unary_negative('nan_to_num')
def test_nan_to_num_for_old_numpy(self):
self.check_unary('nan_to_num', no_bool=True)
def test_nan_to_num_negative_for_old_numpy(self):
self.check_unary_negative('nan_to_num', no_bool=True)
def test_nan_to_num_inf(self):
self.check_unary_inf('nan_to_num')
def test_nan_to_num_nan(self):
self.check_unary_nan('nan_to_num')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_nan_to_num_scalar_nan(self, xp):
return xp.nan_to_num(xp.nan)
def test_nan_to_num_inf_nan(self):
self.check_unary_inf_nan('nan_to_num')
def test_nan_to_num_nan_arg(self):
self.check_unary_nan('nan_to_num', nan=1.0)
def test_nan_to_num_inf_arg(self):
self.check_unary_inf('nan_to_num', posinf=1.0, neginf=-1.0)
@testing.numpy_cupy_array_equal()
def test_nan_to_num_copy(self, xp):
x = xp.asarray([0, 1, xp.nan, 4], dtype=xp.float64)
y = xp.nan_to_num(x, copy=True)
assert x is not y
return y
@testing.numpy_cupy_array_equal()
def test_nan_to_num_inplace(self, xp):
x = xp.asarray([0, 1, xp.nan, 4], dtype=xp.float64)
y = xp.nan_to_num(x, copy=False)
assert x is y
return y
@pytest.mark.parametrize('kwarg', ['nan', 'posinf', 'neginf'])
def test_nan_to_num_broadcast(self, kwarg):
for xp in (numpy, cupy):
x = xp.asarray([0, 1, xp.nan, 4], dtype=xp.float64)
y = xp.zeros((2, 4), dtype=xp.float64)
with pytest.raises(ValueError):
xp.nan_to_num(x, **{kwarg: y})
with pytest.raises(ValueError):
xp.nan_to_num(0.0, **{kwarg: y})
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_real_if_close_real_dtypes(self, xp, dtype):
x = testing.shaped_random((10,), xp, dtype)
return xp.real_if_close(x)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_real_if_close_with_tol_real_dtypes(self, xp, dtype):
x = testing.shaped_random((10,), xp, dtype)
return xp.real_if_close(x, tol=1e-6)
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_true(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
tol = numpy.finfo(dtype).eps * 90
x = testing.shaped_random((10,), xp, dtype) + tol * 1j
out = xp.real_if_close(x)
assert x.dtype != out.dtype
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_false(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
tol = numpy.finfo(dtype).eps * 110
x = testing.shaped_random((10,), xp, dtype) + tol * 1j
out = xp.real_if_close(x)
assert x.dtype == out.dtype
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_with_integer_tol_true(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
tol = numpy.finfo(dtype).eps * 140
x = testing.shaped_random((10,), xp, dtype) + tol * 1j
out = xp.real_if_close(x, tol=150)
assert x.dtype != out.dtype
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_with_integer_tol_false(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
tol = numpy.finfo(dtype).eps * 50
x = testing.shaped_random((10,), xp, dtype) + tol * 1j
out = xp.real_if_close(x, tol=30)
assert x.dtype == out.dtype
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_with_float_tol_true(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
x = testing.shaped_random((10,), xp, dtype) + 3e-4j
out = xp.real_if_close(x, tol=1e-3)
assert x.dtype != out.dtype
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_array_equal()
def test_real_if_close_with_float_tol_false(self, xp, dtype):
dtype = numpy.dtype(dtype).char.lower()
x = testing.shaped_random((10,), xp, dtype) + 3e-3j
out = xp.real_if_close(x, tol=1e-3)
assert x.dtype == out.dtype
return out
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_all_dtypes(name='dtype_y', no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
return xp.interp(x, fx, fy)
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_all_dtypes(name='dtype_y', no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_period(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
return xp.interp(x, fx, fy, period=5)
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_all_dtypes(name='dtype_y', no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_left_right(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
left = 10
right = 20
return xp.interp(x, fx, fy, left, right)
@testing.with_requires('numpy>=1.17.0')
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_nan_fy(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
fy[0] = fy[2] = fy[-1] = numpy.nan
return xp.interp(x, fx, fy)
@testing.with_requires('numpy>=1.17.0')
@testing.for_float_dtypes(name='dtype_x')
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_nan_fx(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
fx[-1] = numpy.nan # x and fx must remain sorted (NaNs are the last)
return xp.interp(x, fx, fy)
@testing.with_requires('numpy>=1.17.0')
@testing.for_float_dtypes(name='dtype_x')
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_nan_x(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
x[-1] = numpy.nan # x and fx must remain sorted (NaNs are the last)
return xp.interp(x, fx, fy)
@testing.with_requires('numpy>=1.17.0')
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_inf_fy(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
fy[0] = fy[2] = fy[-1] = numpy.inf
return xp.interp(x, fx, fy)
@testing.with_requires('numpy>=1.17.0')
@testing.for_float_dtypes(name='dtype_x')
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_inf_fx(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
fx[-1] = numpy.inf # x and fx must remain sorted
return xp.interp(x, fx, fy)
@testing.with_requires('numpy>=1.17.0')
@testing.for_float_dtypes(name='dtype_x')
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_inf_x(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([1, 3, 5, 7, 9], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
x[-1] = numpy.inf # x and fx must remain sorted
return xp.interp(x, fx, fy)
@testing.for_all_dtypes(name='dtype_x', no_bool=True, no_complex=True)
@testing.for_all_dtypes(name='dtype_y', no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_size1(self, xp, dtype_y, dtype_x):
# interpolate at points on and outside the boundaries
x = xp.asarray([0, 1, 2, 4, 6, 8, 9, 10], dtype=dtype_x)
fx = xp.asarray([5], dtype=dtype_x)
fy = xp.sin(fx).astype(dtype_y)
left = 10
right = 20
return xp.interp(x, fx, fy, left, right)
@testing.with_requires('numpy>=1.17.0')
@testing.for_float_dtypes(name='dtype_x')
@testing.for_dtypes('efdFD', name='dtype_y')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_interp_inf_to_nan(self, xp, dtype_y, dtype_x):
# from NumPy's test_non_finite_inf
x = xp.asarray([0.5], dtype=dtype_x)
fx = xp.asarray([-numpy.inf, numpy.inf], dtype=dtype_x)
fy = xp.asarray([0, 10], dtype=dtype_y)
return xp.interp(x, fx, fy)
@testing.parameterize(*testing.product({
'mode': ['valid', 'same', 'full'],
'shape1': [(), (5,), (6,), (20,), (21,)],
'shape2': [(), (5,), (6,), (20,), (21,)],
}))
class TestConvolveShapeCombination:
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-3)
def test_convolve(self, xp, dtype):
a = testing.shaped_arange(self.shape1, xp, dtype)
b = testing.shaped_arange(self.shape2, xp, dtype)
return xp.convolve(a, b, mode=self.mode)
@pytest.mark.parametrize('mode', ['valid', 'same', 'full'])
class TestConvolve:
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_convolve_non_contiguous(self, xp, dtype, mode):
a = testing.shaped_arange((300,), xp, dtype)
b = testing.shaped_arange((100,), xp, dtype)
return xp.convolve(a[::200], b[10::70], mode=mode)
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4)
def test_convolve_large_non_contiguous(self, xp, dtype, mode):
a = testing.shaped_arange((10000,), xp, dtype)
b = testing.shaped_arange((100,), xp, dtype)
return xp.convolve(a[200::], b[10::70], mode=mode)
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(rtol=1e-2)
def test_convolve_diff_types(self, xp, dtype1, dtype2, mode):
a = testing.shaped_random((200,), xp, dtype1)
b = testing.shaped_random((100,), xp, dtype2)
return xp.convolve(a, b, mode=mode)
@testing.parameterize(*testing.product({
'mode': ['valid', 'same', 'full']
}))
class TestConvolveInvalid:
@testing.for_all_dtypes()
def test_convolve_empty(self, dtype):
for xp in (numpy, cupy):
a = xp.zeros((0,), dtype)
with pytest.raises(ValueError):
xp.convolve(a, a, mode=self.mode)
@testing.for_all_dtypes()
def test_convolve_ndim(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_arange((10, 5), xp, dtype)
with pytest.raises(ValueError):
xp.convolve(a, b, mode=self.mode)
| cupy/cupy | tests/cupy_tests/math_tests/test_misc.py | Python | mit | 19,721 |
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask_login import LoginManager
login_manager = LoginManager()
from flask_babel import Babel
babel = Babel()
from flask_bootstrap import Bootstrap
bootstrap = Bootstrap()
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
from flask_oauthlib.client import OAuth
oauth = OAuth()
from flask_marshmallow import Marshmallow
marshmallow = Marshmallow()
from flask_flatpages import FlatPages
flatpages = FlatPages()
from flask_rq2 import RQ
rq = RQ()
from flask_caching import Cache
cache = Cache()
| zgoda/zakwasy | zkw/ext.py | Python | mit | 574 |
from threading import Thread
import socket
import simplejson as json
from config import config
class CommandServer(Thread):
def __init__(self, monitor, config):
super(CommandServer, self).__init__(name="CommandServer")
self.monitor = monitor
self.project_list = monitor.project_list
self.running = True
def run(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM
)
sock.bind(("localhost", config["server_socket_port"],))
sock.listen(1)
while self.running:
connection, client_addr = sock.accept()
message = ""
try:
message = connection.recv(4096)
# while True:
# packet = connection.recv(4096)
# if packet:
# print packet
# message += packet
# else:
# break
print "Received command. Whole message: %s" % (message,)
response = json.dumps(self.dispatch(message))
connection.sendall(response)
finally:
connection.close()
print "Shutting down CommandServer thread: falling off end of run() ..."
def stop(self):
self.running = False
def dispatch(self, message):
name = message.split(" ")[0]
args = message.split(" ")[1:]
dispatch_table = {
"add-project" : self.add_project,
"stop-project" : self.stop_project,
"list-projects" : self.list_projects,
"shutdown" : self.shutdown,
}
if name in dispatch_table:
return dispatch_table[name](*args)
else:
return {"message" : "Invalid command"}
# add project {name} {tags}
def add_project(self, name, tags):
(project, id,) = self.project_list.create(name, tags)
return {
"message" : "Project created",
"name" : project.name,
"id" : project.id,
}
# stop project {name}
def stop_project(self, id):
# TODO: commit data
return self.project_list.stop(id)
# list projects
def list_projects(self):
projects = []
for project in self.project_list.active_projects.itervalues():
projects.append({
"key" : project.id,
"name" : project.name,
"keywords" : project.watching
})
return projects
# shutdown
def shutdown(self):
# send monitor command to shut down
self.monitor.stop()
self.stop()
return {
"message" : "Stopped",
} | shepdl/stream-daemon | CommandServer.py | Python | mit | 2,733 |
import torch
import numpy as np
from rdkit import Chem
def Variable(tensor):
"""Wrapper for torch.autograd.Variable that also accepts
numpy arrays directly and automatically assigns it to
the GPU. Be aware in case some operations are better
left to the CPU."""
if isinstance(tensor, np.ndarray):
tensor = torch.from_numpy(tensor)
if torch.cuda.is_available():
return torch.autograd.Variable(tensor).cuda()
return torch.autograd.Variable(tensor)
def decrease_learning_rate(optimizer, decrease_by=0.01):
"""Multiplies the learning rate of the optimizer by 1 - decrease_by"""
for param_group in optimizer.param_groups:
param_group['lr'] *= (1 - decrease_by)
def seq_to_smiles(seqs, voc):
"""Takes an output sequence from the RNN and returns the
corresponding SMILES."""
smiles = []
for seq in seqs.cpu().numpy():
smiles.append(voc.decode(seq))
return smiles
def fraction_valid_smiles(smiles):
"""Takes a list of SMILES and returns fraction valid."""
i = 0
for smile in smiles:
if Chem.MolFromSmiles(smile):
i += 1
return i / len(smiles)
def unique(arr):
# Finds unique rows in arr and return their indices
arr = arr.cpu().numpy()
arr_ = np.ascontiguousarray(arr).view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
_, idxs = np.unique(arr_, return_index=True)
if torch.cuda.is_available():
return torch.LongTensor(np.sort(idxs)).cuda()
return torch.LongTensor(np.sort(idxs))
| MarcusOlivecrona/REINVENT | utils.py | Python | mit | 1,554 |
from doit.doit_cmd import DoitMain
def cmd_main(args):
return DoitMain().run(args)
class TestHelp(object):
def test_help_usage(self, capsys):
cmd_main(["help"])
out, err = capsys.readouterr()
assert "doit list" in out
def test_help_task_params(self, capsys):
cmd_main(["help", "task"])
out, err = capsys.readouterr()
assert "Task Dictionary parameters" in out
def test_help_cmd(self, capsys):
cmd_main(["help", "list"])
out, err = capsys.readouterr()
assert "Purpose: list tasks from dodo file" in out
def test_help_task_name(self, capsys, restore_cwd):
cmd_main(["help", "-f", "tests/loader_sample.py", "xxx1"])
out, err = capsys.readouterr()
assert "xxx1" in out # name
assert "task doc" in out # doc
assert "" in out # params
def test_help_wrong_name(self, capsys, restore_cwd):
cmd_main(["help", "-f", "tests/loader_sample.py", "wrong_name"])
out, err = capsys.readouterr()
assert "doit list" in out
def test_help_no_dodo_file(self, capsys):
cmd_main(["help", "-f", "no_dodo", "wrong_name"])
out, err = capsys.readouterr()
assert "doit list" in out
| lelit/doit | tests/test_cmd_help.py | Python | mit | 1,250 |
#!/usr/bin/python3
# Copyright (C) 2016 Zhixian MA <[email protected]>
# MIT license
#
"""
A gas tracker to detect halo and gas peaks of the two merging galaxies.
The `hdf5` files of simulated result generated from Gadget are as the
input, and two images of the **halo** and **gas** are the outputs. Gas
peaks, i.e., the particles with highest local mass densities are marked
on the gas image.
Parameters
----------
input_dir: string
The folder holding those simulated results.
output_dir: string
The folder to save ouput results.
width: tuple
Width of the output image, e,g. (2,'Mpc').
References
----------
[1] User-guide of gadget-2
http://www.gadgetcode.org/
[2] yt documents
http://yt-project.org/doc/index.html
"""
import os
import re
import sys
import shutil
import numpy as np
import yt
from yt.visualization.fixed_resolution import FixedResolutionBuffer
import partsplit
class GasTrack:
"""
The gas tracker class.
Methods
-------
get_halo_map:
Get halo map by the yt YTQuadTreeProj and FixedResolutionBuffer
objects.
get_gas_map:
Generate gas map with peaks marked.
locate_peak:
Locate peaks in the halo map.
load_hdf5:
Load the hdf5 file
References
----------
[1] YTQuadTreeProj
http://yt-project.org/doc/reference/api/generated/yt.data_objects.
construction_data_containers.YTQuadTreeProj.html?highlight=
ytquadtreeproj
[2] FixedResolutionBuffer
http://yt-project.org/doc/reference/api/generated/yt.visualization.
fixed_resolution.FixedResolutionBuffer.html?highlight=
fixedresolutionbuffer
"""
def __init__(self, input_dir, output_dir, width=(0.8, 'Mpc'),
buffsize=(3200, 3200)):
self.input_dir = input_dir
self.output_dir = output_dir
self.save = True
# Projection axis
self.axis = 'z'
# center
self.center = 'c'
# width
self.width = width
# buff_size
self.buff_size = buffsize
# image size
self.imgsize = (800, 800)
# unit_base
self.unit_base = {'UnitLength_in_cm': 3.08568E21,
'UnitMass_in_g': 1.989E43,
'UnitVelocity_in_cm_per_s': 100000}
# Bound box
bbox_lim = 1E5 # [kpc]
self.bbox = [[-bbox_lim, bbox_lim],
[-bbox_lim, bbox_lim],
[-bbox_lim, bbox_lim]]
# fields
self.halo_field = ('deposit', 'PartType1_density')
self.gas_field = ('gas', 'density')
def load_hdf5(self, fname):
"""Load the hdf5 files
Parameter
---------
fname: str
Filename.
Return
------
ds: yt.frontends.gadget.data_structures.GadgetHDF5Dataset
The yt GadgetHDF5Dataset object contains fields we are
interested.
"""
fpath = os.path.join(self.input_dir, fname)
ds = yt.load(fpath, unit_base=self.unit_base,
bounding_box=self.bbox)
return ds
def get_window_parameters(self, ds):
"""Get bounds of the axes."""
width = ds.coordinates.sanitize_width(self.axis, self.width, None)
center, display_center = ds.coordinates.sanitize_center(
self.center, self.axis)
xax = ds.coordinates.x_axis[self.axis]
yax = ds.coordinates.y_axis[self.axis]
bounds = (display_center[xax] - width[0] / 2,
display_center[xax] + width[0] / 2,
display_center[yax] - width[1] / 2,
display_center[yax] + width[1] / 2)
return bounds
def get_halo_map(self, ds, filename=None):
"""Get halo map and locate peaks
Parameter
---------
ds: yt.frontends.gadget.data_structures.GadgetHDF5Dataset
Return
------
peaklist: np.ndarray
In which contains location of halo peaks.
"""
# Get projected halo density
halo_proj = ds.proj(self.halo_field, self.axis)
# Get fiexed resolution buffer
bounds = self.get_window_parameters(ds)
halo_frb = FixedResolutionBuffer(halo_proj, bounds, self.imgsize)
# YTarray to np.ndarray
halo_map = np.array(halo_frb[self.halo_field], dtype=float)
# Normalization
halo_min = halo_map.min()
halo_max = halo_map.max()
halo_map_norm = (halo_map - halo_min) / (halo_max - halo_min)
# Detect peaks
# peaklist = self.locate_peak(halo_map_norm)
if self.save == True:
pz = yt.ProjectionPlot(ds, self.axis, self.halo_field,
width=self.width)
pz.set_buff_size(self.buff_size)
filepath = os.path.join(self.output_dir, filename)
pz.save(filepath)
return halo_map_norm
def get_gas_map(self, ds, peaklist, filename=None):
"""Get gas map and mark peaks.
Parameter
---------
ds: yt.frontends.gadget.data_structures.GadgetHDF5Dataset
peaklist: np.ndarray
In which contains location of halo peaks
"""
# Generate gas map
pz = yt.ProjectionPlot(ds, self.axis, self.gas_field,
width=self.width)
# Set buff_size
pz.set_buff_size(self.buff_size)
# Markers
if peaklist.shape[0] == 1:
# one peak
pz.annotate_marker((peaklist[0, 1], peaklist[0, 2]),
coord_system='plot',
plot_args={'color': 'blue', 's': 500})
else:
pz.annotate_marker((peaklist[0, 1], peaklist[0, 2]),
coord_system='plot',
plot_args={'color': 'blue', 's': 500})
pz.annotate_marker((peaklist[1, 1], peaklist[1, 2]),
marker="+",
coord_system='plot',
plot_args={'color': 'red', 's': 300})
idx = re.findall(r'[0-9][0-9][0-9]', filename)
i = int(idx[0])
pz.annotate_text((-0.3, 0.3), '%.2f Gyr' %
(i * 0.02), coord_system='plot')
if self.save == True:
filepath = os.path.join(self.output_dir, filename)
pz.save(filepath)
def locate_peak(self, img_mat):
"""
Locate peaks in the map
References
----------
[1] http://stackoverflow.com/questions/3684484/peak-detection-
in-a-2d-array
[2] http://stackoverflow.com/questions/9111711/get-coordinates-
of-local-maxima-in-2d-array-above-certain-value
"""
# Init
peaks = []
cord_x = []
cord_y = []
rows, cols = img_mat.shape
# Find peaks
peak_max = img_mat.max()
peak_y, peak_x = np.where(img_mat == peak_max)
peak_y = int(round(np.mean(peak_y)))
peak_x = int(round(np.mean(peak_x)))
# Judege and fill
# append
peaks.append(peak_max)
cord_x.append(peak_x)
cord_y.append(peak_y)
peaklist = np.array([peaks, cord_x, cord_y]).transpose()
print(peaklist)
# pix to unit
peaklist = self.pix_to_unit(peaklist)
return peaklist
def pix_to_unit(self, peaklist):
"""Locate the real pixels in the raw image."""
rows, cols = self.imgsize
pix_per_unit_col = self.width[0] / cols
pix_per_unit_row = self.width[0] / rows
peaklist[:, 1] = peaklist[:, 1] * pix_per_unit_col - self.width[0] / 2
peaklist[:, 2] = peaklist[:, 2] * pix_per_unit_row - self.width[0] / 2
return peaklist
def main(argv):
"""
Prcessing on the whole simulation files.
Parameter
---------
argv: string
argv = input_dir, output_dir
"""
# Init
# Input direction
input_dir = argv[1]
# Output direction
output_dir = argv[2]
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Init GasTrack class
gt = GasTrack(input_dir, output_dir)
# Init sub particle hdft files
ps_output = os.path.join(gt.input_dir, 'subs')
if os.path.exists(ps_output):
shutil.rmtree(ps_output)
os.mkdir(ps_output)
else:
os.mkdir(ps_output)
ps = partsplit.PartSplit(gt.input_dir, ps_output)
# main circulation
files = os.listdir(path=input_dir)
files.sort()
BeginIdx = 3
for i, f in enumerate(files):
if os.path.splitext(f)[-1] == '.hdf5':
snapidx = re.findall(r'[0-9][0-9][0-9]', f)
snapidx = int(snapidx[0])
if snapidx == BeginIdx:
print('snap %03d' % snapidx)
# Gen split particle hdf5 files
h5name = ("snap_%03d.hdf5" % snapidx)
ps.get_single_file(h5name)
# Init save names
haloname = ("halo_%03d_Projection_z_density_%f_%d_0.png" %
(snapidx, gt.width[0], gt.buff_size[0]))
gasname = ("snap_%03d_Projection_z_density_%f_%d_0.png" %
(snapidx, gt.width[0], gt.buff_size[0]))
# get peaks
gt.save = True
width_old = gt.width
gt.width = (2, 'Mpc')
# cluster1
f_c1 = ('subs/snap_%03d_c1.hdf5' % snapidx)
ds1 = gt.load_hdf5(f_c1)
halo_c1 = ("halo_%03d_Projection_z_density_%f_%d_c1.png" %
(snapidx, gt.width[0], gt.buff_size[0]))
halo_map_c1 = gt.get_halo_map(ds1, halo_c1)
peak1 = gt.locate_peak(halo_map_c1)
os.remove(os.path.join(gt.input_dir, f_c1))
# cluster2
f_c2 = ('subs/snap_%03d_c2.hdf5' % snapidx)
ds2 = gt.load_hdf5(f_c2)
halo_c2 = ("halo_%03d_Projection_z_density_%f_%d_c2.png" %
(snapidx, gt.width[0], gt.buff_size[0]))
halo_map_c2 = gt.get_halo_map(ds2, halo_c2)
peak2 = gt.locate_peak(halo_map_c2)
os.remove(os.path.join(gt.input_dir, f_c2))
# Combine
peaklist = np.row_stack((peak1, peak2))
# get all images
gt.save = True
gt.width = width_old
ds = gt.load_hdf5(f)
halo_map = gt.get_halo_map(ds, haloname)
gt.get_gas_map(ds, peaklist, gasname)
else:
pass
if __name__ == "__main__":
main(sys.argv)
| myinxd/gastrack | gastrack/gastrack.py | Python | mit | 10,766 |
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from . import views
# API
urlpatterns = [
url(r'auth/', include('knox.urls')),
]
# Add special views from view sets
urlpatterns += [
url(
regex=r'^flowcell/by_vendor_id/(?P<vendor_id>.+)/$',
view=views.FlowCellViewSet.as_view({'get': 'by_vendor_id'}),
name='flowcell-by-vendor-id',
),
url(
regex=r'^sequencingmachine/by_vendor_id/(?P<vendor_id>.+)/$',
view=views.SequencingMachineViewSet.as_view({'get': 'by_vendor_id'}),
name='sequencing_machine-by-vendor-id',
),
]
router = DefaultRouter()
router.register(r'flowcell', views.FlowCellViewSet, base_name='flowcell')
router.register(r'barcodeset', views.BarcodeSetViewSet, base_name='barcodeset')
router.register(r'sequencingmachine', views.SequencingMachineViewSet, base_name='sequencingmachine')
router.register(r'message', views.FlowCellMessageViewSet, base_name='message')
urlpatterns += router.urls
| bihealth/flowcelltool | flowcelltool/flowcells/api_v1/urls.py | Python | mit | 1,018 |
import logging
import warnings
from django.conf import settings
from django.core.management.base import BaseCommand
from mailer.engine import send_all
from mailer.management.helpers import CronArgMixin
# allow a sysadmin to pause the sending of mail temporarily.
PAUSE_SEND = getattr(settings, "MAILER_PAUSE_SEND", False)
logger = logging.getLogger(__name__)
class Command(CronArgMixin, BaseCommand):
help = "Do one pass through the mail queue, attempting to send all mail."
def handle(self, *args, **options):
if options['cron'] == 0:
warnings.warn("send_mail's -c/--cron option is no longer "
"necessary and will be removed in a future release",
DeprecationWarning)
logger.info("-" * 72)
# if PAUSE_SEND is turned on don't do anything.
if not PAUSE_SEND:
send_all()
else:
logger.info("sending is paused, quitting.")
| pinax/django-mailer | src/mailer/management/commands/send_mail.py | Python | mit | 962 |
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="sankey", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sankey/_idssrc.py | Python | mit | 434 |
"""
Copyright (c) 2016 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/utils.py
Utility functions for UIs.
"""
def confirm():
choice = input('OK to proceed (y/n)? ').lower()
if choice == 'y':
return True
return False
| aisthesis/opttrack | opttrack/lib/ui/utils.py | Python | mit | 259 |
"""
Package for RadioStreamer.database
"""
| Ervie/PAI_Python | RadioStreamer/RadioStreamer/RadioStreamer/database/__init__.py | Python | mit | 43 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, lavende <[email protected]>
from pottery.version import __version__ # NOQA
from pottery.server import HttpServer # NOQA
| lavende/pottery | pottery/__init__.py | Python | mit | 277 |
# vi: ts=8 sts=4 sw=4 et
#
# support.py: test support for draco2.util
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import random
def inverse(s):
inv = []
for i in range(256):
ch = chr(i)
if ch not in s:
inv.append(ch)
return ''.join(inv)
class EncoderTest(object):
RANDOM_TESTS = 10
RANDOM_SIZE = 1024
def setup_class(cls):
random.seed()
def random_data(self, size):
d = []
for i in range(size):
d.append(chr(random.randrange(128)))
return ''.join(d)
def test_encode(self):
for data,encoded in self.encode_vectors:
assert self.quote(data) == encoded
def test_decode(self):
for data,decoded in self.decode_vectors:
assert self.unquote(data) == decoded
def test_random(self):
for i in range(self.RANDOM_TESTS):
size = random.randrange(self.RANDOM_SIZE)
data = self.random_data(size)
encoded = self.quote(data)
decoded = self.unquote(encoded)
assert data == decoded
assert size <= len(encoded)
for ch in self.unsafe:
if ch in self.quotechars:
continue # don't test for quoting character
assert ch not in encoded
| geertj/draco2 | draco2/util/test/support.py | Python | mit | 1,613 |
'''
- Leetcode problem: 1304
- Difficulty: Easy
- Brief problem description:
Given two binary trees, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical and the nodes have the same value.
Example 1:
Input: 1 1
/ \ / \
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \ / \
2 1 1 2
[1,2,1], [1,1,2]
Output: false
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
return self.dfs(p, q)
def dfs(self, node1, node2):
if node1 is None and node2 is None:
return True
if node1 is None or node2 is None:
return False
if node1.val != node2.val:
return False
if self.dfs(node1.left, node2.left):
return self.dfs(node1.right, node2.right)
return False
| bzhou26/leetcode_sol | p100_Same_Tree.py | Python | mit | 1,362 |
import threading
import time
from settings import *
from request import request_html
class Request_sender(threading.Thread):
def __init__(self, handler):
super(Request_sender, self).__init__()
self.handler = handler
self.stop = False
def run(self):
while not self.stop:
with self.handler.queue_lock:
if len(self.handler.queue) == 0:
current_user = None
else:
current_user = self.handler.queue.pop(0)
if not current_user:
time.sleep(1)
else:
if current_user[0] == '-':
current_user = current_user[1:]
check_existence = True
else: check_existence = False
current_url = "https://steamcommunity.com/" + current_user
#html request
html = (current_user,
check_existence,
request_html(current_user, current_url),
request_html(current_user + "/ajaxaliases", current_url + "/ajaxaliases"),
)
with self.handler.html_lock:
self.handler.htmls.append(html)
class Request_handler():
def __init__(self, queue):
self.queue = queue
self.threads = []
self.htmls = []
self.queue_lock = threading.Lock()
self.html_lock = threading.Lock()
def start(self):
for i in range(REQUEST_THREADS):
self.threads.append(Request_sender(self))
self.threads[-1].start()
def stop(self):
for i in self.threads:
i.stop = True
for i in self.threads:
i.join()
self.threads = []
def get_html(self):
with self.html_lock:
if len(self.htmls) == 0: return -1
return self.htmls.pop(0)
def done(self):
with self.html_lock:
return len(self.htmls) == 0 and len(self.threads) == 0
| Anaatti/Steam-crawler | src/request_threading.py | Python | mit | 2,052 |
#!/usr/bin/pickle
""" a basic script for importing student's POI identifier,
and checking the results that they get from it
requires that the algorithm, dataset, and features list
be written to my_classifier.pkl, my_dataset.pkl, and
my_feature_list.pkl, respectively
that process should happen at the end of poi_id.py
"""
import pickle
import sys
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("./tools/")
from feature_format import featureFormat, targetFeatureSplit
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\
\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
def test_classifier(clf, dataset, feature_list, folds = 1000):
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
elif prediction == 1 and truth == 1:
true_positives += 1
else:
print "Warning: Found a predicted label not == 0 or 1."
print "All predictions should take value 0 or 1."
print "Evaluating performance for processed predictions:"
break
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/total_predictions
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print ""
except:
print "Got a divide by zero when trying out:", clf
print "Precision or recall may be undefined due to a lack of true positive predicitons."
CLF_PICKLE_FILENAME = "my_classifier.pkl"
DATASET_PICKLE_FILENAME = "my_dataset.pkl"
FEATURE_LIST_FILENAME = "my_feature_list.pkl"
def dump_classifier_and_data(clf, dataset, feature_list):
with open(CLF_PICKLE_FILENAME, "w") as clf_outfile:
pickle.dump(clf, clf_outfile)
with open(DATASET_PICKLE_FILENAME, "w") as dataset_outfile:
pickle.dump(dataset, dataset_outfile)
with open(FEATURE_LIST_FILENAME, "w") as featurelist_outfile:
pickle.dump(feature_list, featurelist_outfile)
def load_classifier_and_data():
with open(CLF_PICKLE_FILENAME, "r") as clf_infile:
clf = pickle.load(clf_infile)
with open(DATASET_PICKLE_FILENAME, "r") as dataset_infile:
dataset = pickle.load(dataset_infile)
with open(FEATURE_LIST_FILENAME, "r") as featurelist_infile:
feature_list = pickle.load(featurelist_infile)
return clf, dataset, feature_list
def main():
### load up student's classifier, dataset, and feature_list
clf, dataset, feature_list = load_classifier_and_data()
### Run testing script
test_classifier(clf, dataset, feature_list)
if __name__ == '__main__':
main()
| luiscruz/udacity_data_analyst | P05/src/tester.py | Python | mit | 4,508 |
from numericalmethods.lagrange import *
lp = lagrange_polys([-1.5, -0.75, 0, 0.75, 1.5])
l0 = lp[0]
l1 = lp[1]
print(l0(1))
print(l1(1))
ilp = interpolating_lagrange_poly([(-1.5,-14.1014),(-0.75,-0.931596),(0,0),(0.75,0.931596),(1.5,14.1014)])
print(ilp(1))
| RossMeikleham/Numerical-Methods | examples/lagrange_example.py | Python | mit | 261 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'', include('judge.urls')),
)
| paul-g/skeletor | codez/urls.py | Python | mit | 283 |
#! /usr/bin/env python3
import pydot
import NLP40
import NLP41
import NLP42
from NLP40 import *
from NLP41 import *
output_dir='images'
def write_kakariuke_tree(map_list):
offset = 1
for i, map_list_elem in zip(range(offset, len(map_list) + offset), map_list):
write_kakariuke_tree_oneline(i, map_list_elem)
def write_kakariuke_tree_oneline(i, oneline):
graph = pydot.Dot(graph_type='digraph')
map_id_and_node_graph_includes = {}
for i_s, src in enumerate(oneline):
if src.dst() == -1:
continue
chunk_src_string = NLP42.concat_morphs(src.morphs())
chunk_dst_string = NLP42.concat_morphs(oneline[src.dst()].morphs())
if len(chunk_src_string) == 0 or len(chunk_dst_string) == 0:
continue
i_d = src.dst()
if i_s in map_id_and_node_graph_includes:
src_node = map_id_and_node_graph_includes[i_s]
else:
src_node = pydot.Node(str(i_s), label = chunk_src_string)
map_id_and_node_graph_includes[i_s] = src_node
graph.add_node(src_node)
if i_d in map_id_and_node_graph_includes:
dst_node = map_id_and_node_graph_includes[i_d]
else:
dst_node = pydot.Node(str(i_d), label = chunk_dst_string)
map_id_and_node_graph_includes[i_d] = dst_node
graph.add_node(pydot.Node(str(i_d), label = chunk_dst_string))
graph.add_edge(pydot.Edge(src_node, dst_node))
graph.write_png(output_dir + '/' + str(i) + '.png')
if __name__ == '__main__':
lines = NLP40.read_kakariuke_analysis_result()
map_list = NLP41.parse_chunk_analysis_result(lines)
write_kakariuke_tree(map_list)
| TomoyoshiToyoda/language_processing | language_processing_100/python3/sect5/NLP44.py | Python | mit | 1,695 |
#!/usr/bin/env python
#Used: http://stackoverflow.com/questions/579687/how-do-i-copy-a-string-to-the-clipboard-on-windows-using-python
#Polina Morozova 10.11.2014
from tkinter import Tk
from random import randint
def generate_ik(sex, year, month, day):
ik = str(getSex(sex,year))
ik += getYear(year)
ik += getMonth(month)
ik += getDay(day)
ik += getRandom()
ik += getCheckSum(ik)
return ik
# 1 numbri defineerimine
def getSex(sex,year):
#0='male'
#1='female'
if 1800 <= year <= 1899:
if sex==0:
return 1
else:
return 2
if 1900 <= year <= 1999:
if sex==0:
return 3
else:
return 4
if 2000 <= year <= 2099:
if sex==0:
return 5
else:
return 6
if 2100 <= year <= 2199:
if sex==0:
return 7
else:
return 8
# 2 ja 3 numbrite genereemine
def getYear (year):
return str(year)[2:4]
# 4 ja 5 numbrite genereerimine
def getMonth(month):
if month < 10:
return '0%s' % (month)
else:
return str(month)
# 6 ja 7 numbrite genereerimine
def getDay(day):
if day < 10:
return '0%s' % (day)
else:
return str(day)
# 8-10 numbrite genereerimine
def getRandom():
random_number = randint(0, 999)
return ('%s' % (random_number)).zfill(3)
# 11 numbri genereerimine: checksum
def getCheckSum(ik):
aste_I = [1,2,3,4,5,6,7,8,9,1]
aste_II = [3,4,5,6,7,8,9,1,2,3]
total = 0
for num_ik, num_aste_I in zip(ik, aste_I):
total += int(num_ik)*num_aste_I
a = total % 11
if a != 10:
return str(a)
total = 0
for num_ik, num_aste_II in zip(ik, aste_II):
total += int(num_ik)*num_aste_II
a = total % 11
if a != 10:
return str(a)
return '0'
# random ik genereerimie
def get_random_ik():
days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ] # kuupaevade arv kuudes
sex = randint(0, 1)
year = randint(1800, 2199)
month = randint(1, 12)
day = randint(1, days[month])
if month == 2 and year %4 ==0 and year %100 != 0: # leap year
day = randint(1, 29)
return generate_ik(sex, year, month, day)
def main():
r = Tk()
#r.withdraw()
r.clipboard_clear()
r.clipboard_append(get_random_ik())
r.update()
# https://docs.python.org/2/library/__main__.html
if __name__ == '__main__':
main() | p0linka/AA_hmw | hmw_2/clipboard_tkinter.py | Python | mit | 2,171 |
import simplejson
from django.core.cache import cache
from django.conf import settings
from topo.models import Topic
from topo.serializers import ConceptSerializer
from .serializers import QuizSerializer
def get_quiz_by_topic_cache_key(topic):
return "quiz:topic:{}".format(topic.slug)
def get_serialized_quizzes_by_topic(topic):
cache_key = get_quiz_by_topic_cache_key(topic)
result = cache.get(cache_key)
if not result:
result = get_quizzes_by_topic(topic)
result = simplejson.dumps(result)
cache.set(cache_key, result)
return result
def clear_quizzes_by_topic_cache(topic):
cache_key = get_quiz_by_topic_cache_key(topic)
cache.delete(cache_key)
def get_quizzes_by_topic(topic):
concept_serializer = ConceptSerializer()
quiz_serializer = QuizSerializer()
concepts = topic.get_top_sorted_concepts()
serialized_concepts = []
for concept in concepts:
quiz = concept.quiz_set.all()[0]
if not quiz:
if settings.DEBUG:
assert(False) #At least one quiz per concept must be present
else:
continue
quiz = quiz_serializer.to_dict(quiz)
concept_dict = concept_serializer.to_dict(concept)
concept_dict['quiz'] = quiz
serialized_concepts.append(concept_dict)
return serialized_concepts
def set_topic_as_attempted(user_key, topic_id):
attemped_key = "user_{}:quizattempt:topic:{}".format(user_key, topic_id)
if not cache.get(attemped_key):
cache.set(attemped_key, True)
return attemped_key
| pramodliv1/conceptgrapher | server/cg/quiz/diagnose.py | Python | mit | 1,448 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from sklearn.model_selection import ParameterGrid
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline as PipelineSL
from sklearn.base import clone
from chainladder.core.io import EstimatorIO
from joblib import Parallel, delayed
import pandas as pd
import json
class GridSearch(BaseEstimator):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Parameters
----------
estimator : estimator object.
This is assumed to implement the chainladder estimator interface.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : callable or dict of callable(s)
Should be of the form {'name': callable}. The callable(s) should
return a single value.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is 'raise' but from
version 0.22 it will change to np.nan.
n_jobs : int, default=None
The number of jobs to use for the computation. This will only provide
speedup for n_targets > 1 and sufficient large problems.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
results_ : DataFrame
A DataFrame with each param_grid key as a column and the ``scoring``
score as the last column
"""
def __init__(self, estimator, param_grid, scoring, verbose=0,
error_score="raise", n_jobs=None):
self.estimator = estimator
self.param_grid = param_grid
self.scoring = scoring
self.verbose = verbose
self.error_score = error_score
self.n_jobs = n_jobs
def fit(self, X, y=None, **fit_params):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Set of LDFs to which the tail will be applied.
y : Ignored
fit_params : (optional) dict of string -> object
Parameters passed to the ``fit`` method of the estimator
Returns
-------
self : object
Returns the instance itself.
"""
if type(self.scoring) is not dict:
scoring = dict(score=self.scoring)
else:
scoring = self.scoring
grid = list(ParameterGrid(self.param_grid))
def _fit_single_estimator(estimator, fit_params, X, y, scoring, item):
est = clone(estimator).set_params(**item)
model = est.fit(X, y, **fit_params)
for score in scoring.keys():
item[score] = scoring[score](model)
return item
results_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_single_estimator)(
self.estimator, fit_params, X, y, scoring, item)
for item in grid)
self.results_ = pd.DataFrame(results_)
return self
class Pipeline(PipelineSL, EstimatorIO):
"""This is a near direct of copy the scikit-learn Pipeline class.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
to None.
Read more in the :ref:`User Guide <pipeline_docs>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters."""
def fit(self, X, y=None, sample_weight=None, **fit_params):
if sample_weight:
fit_params = {} if not fit_params else fit_params
for step in self.steps:
fit_params[step[0] + "__sample_weight"] = sample_weight
return super().fit(X, y, **fit_params)
def predict(self, X, sample_weight=None, **predict_params):
if sample_weight:
predict_params = {} if not predict_params else predict_params
predict_params["sample_weight"] = sample_weight
return super().predict(X, **predict_params)
def fit_predict(self, X, y=None, sample_weight=None, **fit_params):
self.fit(X, y, sample_weight, **fit_params)
return self.predict(X, sample_weight, **fit_params)
def to_json(self):
return json.dumps(
[
{
"name": item[0],
"params": item[1].get_params(),
"__class__": item[1].__class__.__name__,
}
for item in self.steps
]
)
| jbogaardt/chainladder-python | chainladder/workflow/gridsearch.py | Python | mit | 7,273 |
#!/usr/bin/env python2
"""
Created on 1 Apr 2014
@author: Max Demian
Todo:
* Home Zone (+img_safe)
* Timer
* Animations (Frog + Turtles)
* Class that creates and holds all the objects as sprite groups
* Handle sprites better (separation, SpriteGroups, ...)
* Figure out a different delay for frog.death() as currently you can move
inside the frozen screen. Also, enable death when sliding out the river.
* Sounds & Music
* Score and Highscores
"""
import pygame
import sys
from random import choice, randrange
class StaticObstacle(pygame.sprite.Sprite):
"Base class for all static obstacles"
def __init__(self):
super(StaticObstacle, self).__init__()
def draw(self):
window.blit(self.img, (self.rect.x, self.rect.y))
class TopGround(StaticObstacle):
def __init__(self):
super(TopGround, self).__init__()
self.img = pygame.image.load("data/top_ground.png")
self.rect = self.img.get_rect()
self.rect.x = 0
self.rect.y = 60
self.mask = pygame.mask.from_surface(self.img)
class River(StaticObstacle):
def __init__(self):
super(River, self).__init__()
self.img = pygame.Surface((480, 200), pygame.SRCALPHA)
self.rect = self.img.get_rect()
def draw(self):
self.img.fill((255, 255, 255, 128))
window.blit(self.img, (0, 118))
class Camper(StaticObstacle):
"Enemies camping the safezones inside the TopGround"
def __init__(self):
super(Camper, self).__init__()
self.imgs = ["data/croc.png", "data/fly.png"]
self.img = pygame.image.load(choice(self.imgs))
self.spawns = [420, 320, 220, 120, 20]
self.duration = randrange(5, 11)
self.rect = self.img.get_rect()
self.rect.x = choice(self.spawns)
self.rect.y = 80
self.mask = pygame.mask.from_surface(self.img)
class MovingObstacle(pygame.sprite.Sprite):
"Base class for all moving obstacles"
def __init__(self, x, y, img, direction):
super(MovingObstacle, self).__init__()
self.speed = 2
self.go_left = direction
self.img = pygame.image.load(img)
self.rect = self.img.get_rect()
self.rect.x = x
self.rect.y = y
self.mask = pygame.mask.from_surface(self.img)
def draw(self):
"Moves and then draws the obstacle"
# Adjust the position of the obstacle.
if self.go_left:
self.rect.x -= self.speed
else:
self.rect.x += self.speed
# Reset the object if it moves out of screen.
if isinstance(self, Car):
if self.rect.x > 480:
self.rect.x = -40
elif self.rect.x < -40:
self.rect.x = 480
else:
# To accommodate the big logs and introduce gaps, we use -180 here.
if self.rect.x > 480:
self.rect.x = -180
elif self.rect.x < -180:
self.rect.x = 480
# And finally draw it.
window.blit(self.img, (self.rect.x, self.rect.y))
class Car(MovingObstacle):
def __init__(self, x, y, img, direction=0):
super(Car, self).__init__(x, y, img, direction)
class Turtle(MovingObstacle):
def __init__(self, x, y, img, direction=0):
super(Turtle, self).__init__(x, y, img, direction)
class Log(MovingObstacle):
def __init__(self, x, y, img, direction=0):
super(Log, self).__init__(x, y, img, direction)
class Frog(pygame.sprite.Sprite):
def __init__(self):
super(Frog, self).__init__()
self.img_death = pygame.image.load("data/frog_death_3.png")
self.img_safe = pygame.image.load("data/frog_safe.png")
self.img_life = pygame.image.load("data/lives.png")
self.img_forward = pygame.image.load("data/frog.png")
self.img_back = pygame.image.load("data/frog_back.png")
self.img_left = pygame.image.load("data/frog_left.png")
self.img_right = pygame.image.load("data/frog_right.png")
self.img = self.img_forward
self.rect = self.img.get_rect()
self.lives = 4
self.rect.x = 220
self.rect.y = 560
self.startpos = (self.rect.x, self.rect.y)
self.mask = pygame.mask.from_surface(self.img)
def draw(self):
self.mask = pygame.mask.from_surface(self.img)
self.move()
self.display_lives()
window.blit(self.img, (self.rect.x, self.rect.y))
def move(self):
self.rect.move(self.rect.x, self.rect.y)
# Ensure the player stays within the playable zone.
self.rect.clamp_ip(pygame.Rect((0, 80), (480, 520)))
def left(self):
self.img = self.img_left
self.rect.x -= 20
def right(self):
self.img = self.img_right
self.rect.x += 20
def forward(self):
self.img = self.img_forward
self.rect.y -= 40
def back(self):
self.img = self.img_back
self.rect.y += 40
def display_lives(self):
"Draw the life bar"
x, y = 0, 40
for _ in range(self.lives):
window.blit(self.img_life, (x, y))
x += 20
def death(self):
"Update lives, trigger visual clues and reset frog position to default"
# TODO: Update lives display as soon as death occurs.
self.lives -= 1
self.img = self.img_death
self.draw()
pygame.display.flip()
pygame.time.wait(500)
self.rect.x, self.rect.y = self.startpos
self.img = self.img_forward
def wait_for_input():
# Allow these keys to cancel the loop.
valid_keys = [pygame.K_ESCAPE, pygame.K_SPACE, pygame.K_RETURN]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN:
if event.key in valid_keys:
# Return to wherever we were called from.
return
def pause():
pause_font = pygame.font.Font("data/emulogic.ttf", 20)
pause_label = pause_font.render("PAUSED", 1, (255, 255, 255))
window.blit(pause_label, (180, 300))
pygame.display.flip()
print "paused"
wait_for_input()
def game_over():
gameover_font = pygame.font.Font("data/emulogic.ttf", 20)
gameover_label = gameover_font.render("GAME OVER", 1, (255, 255, 255))
window.blit(gameover_label, (150, 300))
pygame.display.flip()
wait_for_input()
terminate()
def terminate():
pygame.quit()
sys.exit()
def start_screen():
"A simple welcome screen with some music"
# Load music and loop it until the start screen ends.
pygame.mixer.music.load("data/theme.mp3")
pygame.mixer.music.play(-1)
# Draw the start screen with title gif and fonts.
blue, white = (0, 0, 71), (255, 255, 255)
start_font = pygame.font.Font("data/emulogic.ttf", 20)
start_title = pygame.image.load("data/frogger_title.gif")
window.fill(blue)
label1 = start_font.render("Press Enter", 1, white)
label2 = start_font.render("to", 1, white)
label3 = start_font.render("continue", 1, white)
window.blit(label1, (140, 300))
window.blit(label2, (215, 350))
window.blit(label3, (160, 400))
window.blit(start_title, (60, 150))
# Update the screen only once.
pygame.display.flip()
wait_for_input()
pygame.mixer.music.fadeout(2000)
def create_floatables():
"Create the Turtle and Log instances"
floatables = pygame.sprite.Group()
ys = [128, 160, 208, 248, 280]
x = 0
for _ in range(4):
turtle = Turtle(x, ys[4], "data/turtle_3_full.png", 1)
floatables.add(turtle)
x += 128
x = 20
for _ in range(3):
log = Log(x, ys[3], "data/log_small.png")
floatables.add(log)
x += 192
x = 40
for _ in range(2):
log = Log(x, ys[2], "data/log_big.png")
floatables.add(log)
x += 256
x = 60
for _ in range(4):
turtle = Turtle(x, ys[1], "data/turtle_2_full.png", 1)
floatables.add(turtle)
x += 112
x = 80
for _ in range(3):
log = Log(x, ys[0], "data/log_medium.png")
floatables.add(log)
x += 176
return floatables
def create_hostiles():
"Create the obstacles that trigger death on collision"
hostiles = pygame.sprite.Group()
ys = [520, 480, 440, 400, 360]
x = randrange(200)
for _ in range(3):
car = Car(x, ys[0], "data/car_1.png", 1)
hostiles.add(car)
x += 144
x = randrange(200)
for _ in range(3):
car = Car(x, ys[1], "data/car_2.png")
hostiles.add(car)
x += 128
x = randrange(200)
for _ in range(3):
car = Car(x, ys[2], "data/car_3.png", 1)
hostiles.add(car)
x += 128
x = randrange(200)
for _ in range(2):
car = Car(x, ys[3], "data/car_4.png")
hostiles.add(car)
x += 128
x = randrange(200)
for _ in range(2):
car = Car(x, ys[4], "data/car_5.png", 1)
hostiles.add(car)
x += 176
return hostiles
def create_deathzones():
deathzones = pygame.sprite.Group()
topground = TopGround()
deathzones.add(topground)
river = River()
deathzones.add(river)
return deathzones
def main():
start_screen()
# Basic setup.
level = 0
clock = pygame.time.Clock()
background = pygame.image.load("data/background.png")
# Sprite groups
frog = Frog()
hostiles = create_hostiles()
floatables = create_floatables()
deathzones = create_deathzones()
while True:
#======================================================================
# Polling
#======================================================================
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pause()
if event.key == pygame.K_SPACE:
# level += 1
print frog.rect.x, frog.rect.y
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
frog.left()
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
frog.right()
if event.key == pygame.K_UP or event.key == pygame.K_w:
frog.forward()
if event.key == pygame.K_DOWN or event.key == pygame.K_s:
frog.back()
#======================================================================
# # Updating + Drawing
#======================================================================
window.blit(background, (0, 0))
# First, draw the floating obstacles.
for i in floatables:
i.draw()
# Draw the frog so that he appears on top of river objects but beneath
# hostiles.
frog.draw()
for i in hostiles:
i.draw()
# for i in deathzones:
# i.draw()
#======================================================================
# Collision
#======================================================================
for i in hostiles:
offset_x = frog.rect.left - i.rect.left
offset_y = frog.rect.top - i.rect.top
# TODO: Fix car_5 (trucks). Somehow their collision box is off.
# Same goes for the TopGround... Formula to calculate the overlap
# might be off.
if frog.mask.overlap(i.mask, (offset_x, offset_y)):
frog.death()
# if pygame.sprite.spritecollide(frog, deathzones, False):
# frog.death()
# The floatable images have transparency everywhere. The mask collision
# detection is not very reliable here. Thus, we use sprites.
for i in pygame.sprite.spritecollide(frog, floatables, False):
if i.go_left:
frog.rect.x -= i.speed
else:
frog.rect.x += i.speed
# If we're out of lives, invoke the game over screen.
if not frog.lives:
game_over()
# Set the FPS to 30. To implement a rudimentary difficulty system, we
# increment the FPS by 10 per level to speed up the game.
clock.tick(30 + (level * 10))
# Everything is drawn. Now we refresh the display to reflect the
# changes.
pygame.display.update()
if __name__ == '__main__':
# Initialize Pygame, the screen/window and some globals.
pygame.init()
window = pygame.display.set_mode((480, 600), 0, 32)
main()
| mikar/60-days-of-python | games/frogger/frogger.py | Python | mit | 12,845 |
from hamutils.adif import ADXReader
f=open('./test.adx', 'r')
adx = ADXReader(f)
for qso in adx:
print(qso)
| sq8kfh/hamutils | examples/adx_read.py | Python | mit | 114 |
import json
import mcazurerm
# Load Azure app defaults
try:
with open('mcazurermconfig.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("Error: Expecting vmssConfig.json in current folder")
sys.exit()
tenant_id = configData['tenantId']
app_id = configData['appId']
app_secret = configData['appSecret']
access_token = mcazurerm.get_access_token(
tenant_id,
app_id,
app_secret
)
# list subscriptions
subscriptions = mcazurerm.list_subscriptions(access_token)
for sub in subscriptions["value"]:
print(sub["displayName"] + ': ' + sub["subscriptionId"])
# print(type(subscriptions)) # dict
# print(type(subscriptions["value"])) # list
# print(type(subscriptions["value"][0])) # dict
# use the first subscription
subscription_id = subscriptions["value"][0]["subscriptionId"]
# create a resource group
print('Enter Resource group name to create.')
rgname = input()
location = 'southeastasia'
rgreturn = mcazurerm.create_resource_group(access_token, subscription_id, rgname, location)
print(rgreturn)
# list resource groups
# resource_groups = mcazurerm.list_resource_groups(access_token, subscription_id)
# for rg in resource_groups["value"]:
# print(rg["name"] + ', ' + rg["location"] + ', ' + rg["properties"]["provisioningState"])
# delete a resource groups
# location = 'southeastasia'
# rgreturn = mcazurerm.delete_resource_group(access_token, subscription_id, rgname)
# print(rgreturn)
# list resource groups
resource_groups = mcazurerm.list_resource_groups(access_token, subscription_id)
for rg in resource_groups["value"]:
print(rg["name"] + ', ' + rg["location"] + ', ' + rg["properties"]["provisioningState"])
# scale_sets = mcazurerm.list_vm_scale_sets(access_token, subscription_id, 'auto116')
# print(scale_sets)
| pjshi23/mcazurerm | examples/resourcegroups.py | Python | mit | 1,830 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyperator documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 22 11:41:59 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyperator'
copyright = '2017, Simone Baffelli'
author = 'Simone Baffelli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1a'
# The full version, including alpha/beta/rc tags.
release = '0.1a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyperatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyperator.tex', 'pyperator Documentation',
'Simone Baffelli', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyperator', 'pyperator Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyperator', 'pyperator Documentation',
author, 'pyperator', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| baffelli/pyperator | doc/source/conf.py | Python | mit | 5,037 |
#!/usr/bin/env python2
import os.path, sys, sqlite3, re
# == Terms ==
# tag ie. Sweden
# tagpath ie. Earth:Europe:Sweden
# tagpaths ie. Earth:Europe:Sweden,Building:Residentual
# subtag ie. in Earth:Europe, Europe is a subtag of Earth
# leaftag ie. in Earth:Europe:Sweden, Sweden is the leaftag
# obj ie. /home/user/documents/a.txt
# or http://www.example.com/url/file.txt
# objs ie. [ obj, ... ]
class TagNotFoundError( Exception ):
pass
class DBNotFoundError( Exception ):
pass
class TagmDB( object ):
def __init__( self, dbfile = None ):
self.dbpath = os.path.split( dbfile )[0]
self.db = sqlite3.connect( dbfile )
self.db.row_factory = sqlite3.Row
self.db.text_factory = str
# Check if the tags table exists
if not self.db.execute( "select name from sqlite_master WHERE type='table' AND name='tags'" ).fetchone():
# tags Table does not exist, assume all table are missing, so create them
# Objs ( rowid, path )
self.db.execute( 'create table objs ( path )' )
self.db.execute( 'create unique index obj_paths on objs (path)' )
# Tags ( rowid, tag, parent )
self.db.execute( 'create table tags ( tag, parent )' )
self.db.execute( 'create unique index tag_tags on tags (tag,parent)' )
# ObjTags ( rowid, tag_id, obj_id )
self.db.execute( 'create table objtags ( tag_id, obj_id )' )
self.db.execute( 'create index objtag_tags on objtags (tag_id)' )
self.db.execute( 'create index objtag_objs on objtags (obj_id)' )
self.db.commit()
# Private util methods
def _get_tag_ids( self, parsed_tagpaths, create = False ):
'''Takes a list of tagpaths and returns the tag id of the leaf nodes'''
tag_ids = []
for tagpath in parsed_tagpaths:
pid = 0
for tag in tagpath:
row = self.db.execute( "select rowid from tags where tag = ? and parent = ?", ( tag, pid ) ).fetchone()
if not row:
if create:
pid = self.db.execute( "insert into tags ( tag, parent ) values ( ?, ? )", ( tag, pid ) ).lastrowid
else:
raise TagNotFoundError
else:
pid = row['rowid']
tag_ids.append( pid )
return tag_ids
def _get_subtag_ids( self, tag_id ):
'''Gets the subtags for the specified tag_id. Will search recursively'''
subtags = []
query = 'select rowid from tags where parent = ?'
for tag in self.db.execute( query, [ tag_id ] ):
subtags.append( tag['rowid'] )
subtags += self._get_subtag_ids( tag['rowid'] )
return subtags
def _get_tagpath( self, tag_id ):
'''Gets the tagpath for the specifed tag_id'''
row = self.db.execute( 'select parent, tag from tags where rowid = ?', [tag_id] ).fetchone()
tagnames = []
if not row:
raise TagNotFoundError
if row['parent']:
tagnames += self._get_tagpath( row['parent'] )
tagnames.append( row['tag'] )
return tagnames
def _get_obj_ids( self, objs ):
# TODO: Should raise exception on nonexisting objects like _get_tag_ids
# Will currently cause tags to be returned for objects which dont have
# any of the tags presented. Could cause unexpected behavior.
query = "select rowid from objs where path in ( '" + "','".join( objs ) + "' )"
return [ row['rowid'] for row in self.db.execute( query ) ]
# Public methods
def add( self, tags, objs = None, find = None ):
'''
Adds tags to the specified objects
'''
tags = self._get_tag_ids( tags, True )
# If no objs, find can be used to search internaly for objs
if not objs:
objs = self.get( find )
elif isinstance( objs, basestring ):
objs = [ objs ]
for obj in objs:
row = self.db.execute( 'select rowid from objs where path = ?', [obj] ).fetchone()
if not row:
curs = self.db.execute( 'insert into objs ( path ) values ( ? )', ( obj, ) )
obj_id = curs.lastrowid
else:
obj_id = row['rowid']
for i, tag_id in enumerate( tags ):
self.db.execute( 'insert into objtags ( tag_id, obj_id ) values ( ?, ? )', ( tag_id, obj_id ) )
self.db.commit()
def set( self, tags, objs = None, find = None ):
tags = self._get_tag_ids( tags, True )
if not objs:
objs = self.get( find )
elif isinstance( objs, basestring ):
objs = [ objs ]
for obj in objs:
row = self.db.execute( 'select rowid from objs where path = ?', [obj] ).fetchone()
if not row:
curs = self.db.execute( 'insert into objs ( path ) values ( ? )', ( obj, ) )
obj_id = curs.lastrowid
else:
obj_id = row['rowid']
# Remove any existing tags
self.db.execute( 'delete from objtags where obj_id = ?', ( obj_id, ) )
# Add the new tags
for i, tag_id in enumerate( tags ):
self.db.execute( 'insert into objtags ( tag_id, obj_id ) values ( ?, ? )', ( tag_id, obj_id ) )
self.db.commit()
def get( self, tags, obj_tags = False, subtags = False ):
'''
Looks up the objects tagged by the leaftags (or the leaftags' subtags if subtags is True)
and returns the objects themselves, or if obj_tags is True, returns any further tags the
objects are tagged with that are not part of the queried tags.
Example:
If you have objects 1, 2 and 3, and the tags a, b and c.
And object 1 is tagged with a, object 2 with a and b, and
object 3 with a, b and c. Then querying for tags a and b
will return objects 2 and 3, and if obj_tags is True, the tag
c will be returned instead, giving the caller a listing of
what tags are available to further constrain its queries.
'''
try:
# Lookup the leaftag ids
tagids = self._get_tag_ids( tags )
# If required, recursively include all of the subtags of the leaftags
# TODO: subtags as recursion depth _get_subtag_ids( tid, subtags )
if subtags:
tagids = [ [ tid ] + self._get_subtag_ids( tid ) for tid in tagids ]
else:
tagids = [ [ tid ] for tid in tagids ]
except TagNotFoundError:
# One of the tags provided does not exist, thus no query is needed as nothing will be found.
return []
# Start constructing the query
where = []
query_tags = []
query = ''
for i, tagid in enumerate( tagids ):
if i > 0:
query += " left join objtags as t%s on ( t0.obj_id = t%s.obj_id )" % ( i, i )
if len( tagid ) > 1:
# subtags is True, obj can have any of the listed tags
query_tags += tagid
where.append( 't%s.tag_id in ( %s )' % ( i, ', '.join( [ '?' ] * len( tagid ) ) ) )
else:
query_tags.append( tagid[0] )
where.append( 't%s.tag_id = ?' % ( i ) )
# TODO: Rearrange?
if not obj_tags:
query = "select distinct o.path from objtags as t0" + query
query += ' left join objs as o on ( t0.obj_id = o.rowid )'
else:
query = "select distinct tt.tag_id from objtags as t0" + query
query += ' left join objtags as tt on ( tt.obj_id = t0.obj_id and tt.tag_id not in ( %s ) )' % ','.join( [ str( tagid[0] ) for tagid in tagids ] )
where.append( 'tt.tag_id not null' )
if where:
query += ' where ' + ' and '.join( where )
curs = self.db.execute( query, query_tags )
if not obj_tags:
return [ obj['path'] for obj in curs ]
else:
return [ self._get_tagpath( row[0] ) for row in curs ]
def get_obj_tags( self, objs ):
query = "select distinct o0.tag_id from objtags as o0"
where = []
objs = self._get_obj_ids( objs )
if not objs:
return []
for i, obj in enumerate( objs ):
if i > 0:
query += " left join objtags as o%s on ( o0.tag_id = o%s.tag_id )" % ( i, i )
where.append( 'o%s.obj_id = ?' % ( i ) )
if where:
query += ' where ' + ' and '.join( where )
objtags = []
for row in self.db.execute( query, objs ):
objtags.append( self._get_tagpath( row['tag_id'] ) )
return objtags
TAGPATH_SEP = ':'
TAGPATH_SEP_RE = re.compile( r'(?<!\\)%s' % TAGPATH_SEP )
def parse_tagpaths( tagpaths ):
return [ [ tag.strip().replace( '\\:', ':' ) for tag in TAGPATH_SEP_RE.split( tagpath ) ] for tagpath in tagpaths ]
def join_tagpaths( tagpaths ):
return [ TAGPATH_SEP.join( [ tag.replace( TAGPATH_SEP, '\\' + TAGPATH_SEP ) for tag in tags ] ) for tags in tagpaths ]
def process_paths( dbpath, paths, recursive = False, follow = True ):
import fnmatch
def list_recursive():
for root, dirs, files in os.walk( '.' ):
for name in files:
yield os.path.relpath( os.path.join( root, name ) )
# Ensure that paths exist and are relative to db path
for path in paths:
objs_found = False
if not os.path.exists( path ):
# Does not exist, might be a glob path tho
for f in os.listdir( '.' ) if not recursive else list_recursive():
if fnmatch.fnmatch( f, path ):
objs_found = True
yield os.path.relpath( os.path.realpath( f ) if follow else f , dbpath )
if not objs_found:
raise IOError, 'File not found: %s' % path
else:
yield os.path.relpath( os.path.realpath( path ) if follow else path, dbpath )
def setup_parser():
import argparse, sys
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands')
# Init command: Initializes new tagm db file
def do_init( db, dbpath, ns ):
db = TagmDB( '.tagm.db' )
print 'Initiated tagm database in .tagm.db'
init_help = 'Will initialzie a tagm database in a file called .tagm.db located in the current directory'
init_parser = subparsers.add_parser( 'init', help = init_help, description = init_help )
init_parser.set_defaults( func = do_init )
# Add command: Adds tags to objects
def do_add( db, dbpath, ns ):
tags = parse_tagpaths( ns.tags != '' and ns.tags.split(',') or [] )
for f in process_paths( dbpath, ns.objs, ns.recursive, ns.follow ):
db.add( tags, f )
print 'Added', f, 'with tags', ns.tags
add_help = 'Will add the specified tags to the specified objects'
add_parser = subparsers.add_parser( 'add', help = add_help, description = add_help )
add_parser.add_argument( 'tags', help = 'List of tagpaths separated by comma' )
add_parser.add_argument( '-r', '--recursive', action = 'store_true', help = 'the list of objects is actually a list of recursive glob paths' )
add_parser.add_argument( '-f', '--no-follow', dest = 'follow', action = 'store_false',
help = 'do not follow any symlinks')
add_parser.add_argument( 'objs', nargs = '+', help = 'List of objects to be tagged' )
add_parser.set_defaults( func = do_add )
# Set command: directly sets the tags of objects
def do_set( db, dbpath, ns ):
tags = parse_tagpaths( ns.tags != '' and ns.tags.split(',') or [] )
if ns.objs_is_tags:
objs = db.get( parse_tagpaths( ns.objs ) )
else:
objs = process_paths( dbpath, ns.objs, ns.recursive, ns.follow )
for f in objs:
db.set( tags, f )
print 'Set tags to', ns.tags, 'on', f
set_help = 'Will set the specified objects\' tags to the specified tags'
set_parser = subparsers.add_parser( 'set', help = set_help, description = set_help )
set_parser.add_argument( 'tags', help = 'List of tagpaths separated by comma' )
set_parser.add_argument( '-r', '--recursive', action = 'store_true', help = 'the list of objects is actually a list of recursive glob paths' )
set_parser.add_argument( '-f', '--no-follow', dest = 'follow', action = 'store_false',
help = 'do not follow any symlinks')
set_parser.add_argument( '-t', '--tags', dest = 'objs_is_tags', action = 'store_true',
help = 'the list of objects is actually a list of tagspaths used to lookup the actual objects to tag' )
set_parser.add_argument( 'objs', nargs = '+', help = 'List of objects to be tagged' )
set_parser.set_defaults( func = do_set )
# Get command: gets objects tagged with tags
def do_get( db, dbpath, ns ):
if not isinstance( ns.tags, list ):
tags = [ ns.tags ]
elif ns.tags == '':
tags = []
else:
tags = ns.tags
tags = sum( [ t.split(',') for t in tags ], [] )
if not ns.obj_tags:
tags = parse_tagpaths( tags )
objs = db.get( tags, obj_tags = ns.tag_tags, subtags = ns.subtags )
else:
objs = db.get_obj_tags( process_paths( dbpath, tags ) )
if ns.tag_tags or ns.obj_tags:
for tag in sorted( join_tagpaths( objs ) ):
print tag
else:
for obj in objs:
print os.path.relpath( os.path.join( dbpath, obj ) )
get_help = 'Will list all the objects that are taged with all of the specified tags.'
get_parser = subparsers.add_parser( 'get', help = get_help, description = get_help )
get_parser.add_argument( 'tags', nargs = '*', default = [],
help = 'list of tagpaths (or objects incase --obj-tags is used) separated by comma' )
get_parser.add_argument( '--tags', action = 'store_true', dest = 'tag_tags',
help = 'output the tags of the found objects instead of the objects themselves')
get_parser.add_argument( '--subtags', action = 'store_true',
help = 'include subtags of the specified tags in the query')
get_parser.add_argument( '--obj-tags', action = 'store_true',
help = 'lookup the tags of the specified objects instead of the other way around')
get_parser.set_defaults( func = do_get )
return parser
def main():
args = setup_parser().parse_args()
if args.func.__name__ != 'do_init':
# Try and find a .tagr.db file in current dir, if not there continue going up the filetree
# if nothing found, error will be raised.
curpath = os.path.realpath( '.' )
while 1:
if os.path.exists( os.path.join( curpath, '.tagm.db' ) ):
break
elif curpath == '/':
print 'Unable to find tagm database!'
print 'Please create one by running:'
print '%s init' % sys.argv[0]
sys.exit(1)
else:
curpath = os.path.realpath( curpath + '/..' )
dbpath = curpath
db = TagmDB( os.path.join( dbpath, '.tagm.db' ) )
else:
db = dbpath = None
args.func( db, dbpath, args )
if __name__ == '__main__':
main()
| Nimdraug/tagm | tagm.py | Python | mit | 16,286 |
import json
import requests
from metadata import wiley
from metadata import worldscientific
from metadata import jpharmsci
from metadata import hindawi
from metadata import elsevier
from metadata import springer
from metadata import nature
from metadata import ieee
from metadata import iucr
from metadata import bioaging
from metadata import nmd
from metadata import wkhealth
from metadata import crossref
def extract(doi, type=None):
metadata = {}
url = ''
if not type:
try:
return crossref.map(doi)
except:
pass
if type == 'url':
doc_url = doi.rstrip()
else:
url = 'http://doi.org/api/handles/' + doi.rstrip()
if not type:
try:
r = requests.get(url)
except:
return {}
content = json.loads(r.content.decode())
r.close()
if 'values' in content.keys():
for element in content['values']:
if element['type'] == 'URL':
doc_url = element['data']['value']
else:
return metadata
if 'wiley' in doc_url:
metadata = wiley.map(doc_url)
elif 'worldscientific' in doc_url:
metadata = worldscientific.map(doc_url)
elif 'jpharmsci' in doc_url:
metadata = jpharmsci.map(doc_url)
elif 'hindawi' in doc_url:
metadata = hindawi.map(doc_url)
elif 'elsevier' in doc_url:
metadata = elsevier.map(doc_url)
elif 'sciencedirect' in doc_url:
metadata = elsevier.map(doc_url)
elif 'springer' in doc_url:
metadata = springer.map(doc_url)
elif 'nature' in doc_url:
metadata = nature.map(doc_url)
elif 'ieee' in doc_url:
metadata = ieee.map(doc_url)
elif 'iucr' in doc_url:
metadata = iucr.map(doc_url)
elif 'neurobiologyofaging' in doc_url:
metadata = bioaging.map(doc_url)
elif 'nmd-journal' in doc_url:
metadata = nmd.map(doc_url)
elif 'wkhealth' in doc_url:
metadata = wkhealth.map(doi.strip())
return metadata
| sjennewein/MetaDataDistiller | metadata/data.py | Python | mit | 2,067 |
from Controller.GameState import GameState
class GameController:
def __init__(self, repository):
"""
Main game controller. Takes care of creating the states and communicating with deeper functions safely
Input: Repository - The repository to be used with the game
"""
self.repository = repository
def newGame(self):
"""
Creates a new instance of the game
"""
sentence = self.repository.getRandom()[0]
return GameState(sentence)
def addSentence(self, sentence: str):
"""
Forwards the sentence form the user to the repository to be validated and added
Input: sentence in string format to be added
"""
self.repository.addSentence(sentence)
def forceSave(self):
"""
ONLY FOR DEBUGGING!!!
"""
self.repository.saveRepo() | Zephyrrus/ubb | YEAR 1/SEM1/FP/LAB/Examen/Controller/GameController.py | Python | mit | 911 |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import subprocess
'''
Original Source: https://github.com/scipy/scipy/blob/master/setup.py
'''
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
raise RuntimeError("Python version 2.6, 2.7 (TODO: >= 3.2) required.")
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
with open('./requirements.txt') as f:
required = f.read().splitlines()
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# This is a bit hackish: we are setting a global variable so that the main
# pyfunt __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__PUFUNT_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of pyfunt.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('pyfunt/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load pyfunt/__init__.py
import imp
version = imp.load_source('pyfunt.version', 'pyfunt/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='pyfunt/version.py'):
cnt = """\
# THIS FILE IS GENERATED FROM PYFUNT SETUP.PY\
short_version = '%(version)s'\
version = '%(version)s'\
full_version = '%(full_version)s'\
git_revision = '%(git_revision)s'\
release = %(isrelease)s\
if not release:\
version = full_version\
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'pyfunt'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pyfunt')
config.add_data_files(('pyfunt', '*.txt'))
config.get_version('pyfunt/version.py')
return config
def setup_package():
# Rewrite the version file every time
write_version_py()
cmdclass = {}
# Figure out whether to add ``*_requires = ['numpy']``.
# We don't want to do that unconditionally, because we risk updating
# an installed numpy which fails too often. Just if it's not installed, we
# may give it a try. See gh-3379.
build_requires = []
try:
import numpy
if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel' and
sys.platform == 'darwin'):
# We're ony building wheels for platforms where we know there's
# also a Numpy wheel, so do this unconditionally. See gh-5184.
build_requires = ['numpy>=1.7.1']
except:
build_requires = ['numpy>=1.7.1']
metadata = dict(
name="pyfunt",
author="Daniele Ettore Ciriello",
author_email="[email protected]",
version="1.1.0",
license="MIT",
url="https://github.com/dnlcrl/PyFunt",
download_url="https://github.com/dnlcrl/PyFunt",
description="Pythonic Deep Learning Framework",
packages=['pyfunt', 'pyfunt/examples', 'pyfunt/utils', 'pyfunt/examples/residual_networks', ],
cmdclass=cmdclass, # {'build_ext': build_ext},
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
setup_requires=build_requires,
install_requires=required,
# ext_modules=extensions,
keywords='pyfunt deep learning artificial neural network convolution',
)
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
if (len(sys.argv) >= 2 and sys.argv[1] in ('bdist_wheel', 'bdist_egg')) or (
'develop' in sys.argv):
# bdist_wheel/bdist_egg needs setuptools
import setuptools
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
metadata['configuration'] = configuration
print 'setup complete'
setup(**metadata)
if __name__ == '__main__':
setup_package()
| dnlcrl/PyFunt | setup.py | Python | mit | 6,892 |
from sys import argv
script, q = argv
def reversefac(num):
reversefacH(num, 1, 1)
def reversefacH(num, prev, iterator):
"""Helper function that actually calculates the reverse factorial"""
f = prev * iterator
if f == num:
print "%d!" % iterator
elif f > num:
print "NONE"
else:
reversefacH(num, f, iterator + 1)
if __name__ == '__main__':
reversefac(int(q)) | lukasios12/dailyprogrammer | c286e/reverseFactorial.py | Python | mit | 392 |
#!/usr/bin/python
###################################################
### THE VALUES BELOW CAN BE EDITED AS NEEDED ######
###################################################
writeClassesFile = True # TRUE: Writes mark classes to external file. FALSE: Writes mark classes as part of mark.fea file.
genMkmkFeature = True # TRUE: Writes mkmk.fea file. FALSE: Ignores mark-to-mark placement.
indianScriptsFormat = True # TRUE: Writes abvm.fea and blwm.fea files. FALSE: Writes simple mark.fea file.
trimCasingTags = True # TRUE: Trims casing tags so that all marks can be applied to UC/LC. FALSE: Leaves casing tags as is.
# ----------------------------------------------
libraryNotFound = False
import sys, os, time
try:
from defcon import Font
except:
print "ERROR: This script requires defcon. It can be downloaded from https://github.com/typesupply/defcon"
libraryNotFound = True
try:
import WriteFeaturesMarkFDK
except:
print "ERROR: This script requires WriteFeaturesMarkFDK.py. It can be downloaded from https://github.com/adobe-type-tools/python-modules"
libraryNotFound = True
if libraryNotFound:
sys.exit()
fontsList = []
def getFontPaths(path, startpath):
# print "Searching in path...", path
files = os.listdir(path)
for file in files:
if file[-4:].lower() in [".ufo"]:
fontsList.append(os.path.join(path, file)) #[len(startpath)+1:])
else:
if os.path.isdir(os.path.join(path, file)):
getFontPaths(os.path.join(path, file), startpath)
def doTask(fonts):
totalFonts = len(fonts)
print "%d fonts found\n" % totalFonts
i = 0
for font in fonts:
i += 1
folderPath, fontFileName = os.path.split(os.path.realpath(font)) # path to the folder where the font is contained and the font's file name
styleName = os.path.basename(folderPath) # name of the folder where the font is contained
# Change current directory to the folder where the font is contained
os.chdir(folderPath)
print '*******************************'
print 'Exporting mark files for %s...(%d/%d)' % (styleName, i, totalFonts)
ufoFont = Font(fontFileName)
WriteFeaturesMarkFDK.MarkDataClass(ufoFont, folderPath, trimCasingTags, genMkmkFeature, writeClassesFile, indianScriptsFormat)
def run():
# if a path is provided
if len(sys.argv[1:]):
baseFolderPath = sys.argv[1]
if baseFolderPath[-1] == '/': # remove last slash if present
baseFolderPath = baseFolderPath[:-1]
# make sure the path is valid
if not os.path.isdir(baseFolderPath):
print 'Invalid directory.'
return
# if a path is not provided, use the current directory
else:
baseFolderPath = os.getcwd()
t1 = time.time()
getFontPaths(baseFolderPath, baseFolderPath)
if len(fontsList):
doTask(fontsList)
else:
print "No fonts found"
return
t2 = time.time()
elapsedSeconds = t2-t1
if (elapsedSeconds/60) < 1:
print 'Completed in %.1f seconds.' % elapsedSeconds
else:
print 'Completed in %.1f minutes.' % (elapsedSeconds/60)
if __name__=='__main__':
run()
| moyogo/python-scripts | FDK Extras/generateAllMarkFiles.py | Python | mit | 3,003 |
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from helpers.award_helper import AwardHelper
from helpers.district_helper import DistrictHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
return json.dumps(Event.get_by_id(event_key).matchstats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = DistrictHelper.calculate_event_points(self.event)
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 2
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
keys = Event.query(Event.year == self.year).fetch(1000, keys_only=True)
events = ndb.get_multi(keys)
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
| 1fish2/the-blue-alliance | controllers/api/api_event_controller.py | Python | mit | 6,553 |
def classproperty(func):
"""Method decorator to turn a method into a class property.
Only getting the value is possible"""
class _classproperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
return _classproperty(classmethod(func))
| amirouche/django-images | django_images/helpers.py | Python | mit | 307 |
from io import BytesIO
from os import SEEK_SET
from random import choice
from string import ascii_uppercase, digits
import fastavro
def test_str_py3():
letters = ascii_uppercase + digits
id_size = 100
def gen_id():
return "".join(choice(letters) for _ in range(id_size))
keys = ["first", "second", "third", "fourth"]
testdata = [{key: gen_id() for key in keys} for _ in range(50)]
schema = {
"fields": [{"name": key, "type": "string"} for key in keys],
"namespace": "namespace",
"name": "zerobyte",
"type": "record",
}
buf = BytesIO()
fastavro.writer(buf, schema, testdata)
buf.seek(0, SEEK_SET)
for i, rec in enumerate(fastavro.reader(buf), 1):
pass
size = len(testdata)
assert i == size, "bad number of records"
assert rec == testdata[-1], "bad last record"
def test_py3_union_string_and_bytes():
schema = {
"fields": [{"name": "field", "type": ["string", "bytes"]}],
"namespace": "namespace",
"name": "union_string_bytes",
"type": "record",
}
records = [{"field": "string"}, {"field": b"bytes"}]
buf = BytesIO()
fastavro.writer(buf, schema, records)
| fastavro/fastavro | tests/test_str_py3.py | Python | mit | 1,224 |
import serial
inp = raw_input("Enter serial port: ")
ser = serial.Serial(inp)
print("")
while True:
params = raw_input()
ser.write(params + "\n")
print("")
| tarquasso/softroboticfish6 | logging/write_serial.py | Python | mit | 170 |
#!/usr/bin/env python
from setuptools import setup
setup(
name="hashcache",
version="0.0.1",
description="Wrapper for the django cache api to gracefully handle long or non-ascii keys",
license="MIT",
author="Yola, Inc.",
author_email="[email protected]",
url="http://github.com/yola/hashcache",
packages = ("hashcache",),
keywords= "django cache library",
zip_safe = False,
)
| yola/hashcache | setup.py | Python | mit | 419 |
# Generated by Django 2.1.7 on 2019-04-02 08:01
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [("club", "0007_auto_20190402_0856")]
operations = [
migrations.AlterField(
model_name="articlepage",
name="body",
field=wagtail.core.fields.StreamField(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock(icon="image")),
("document", wagtail.documents.blocks.DocumentChooserBlock()),
(
"two_columns",
wagtail.core.blocks.StructBlock(
[
(
"background",
wagtail.core.blocks.ChoiceBlock(
choices=[("bg-white", "White"), ("bg-light", "Light"), ("bg-dark", "Dark")]
),
),
(
"left_column",
wagtail.core.blocks.StreamBlock(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
icon="arrow-left",
label="Left column content",
),
),
(
"right_column",
wagtail.core.blocks.StreamBlock(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
icon="arrow-right",
label="Right column content",
),
),
]
),
),
(
"three_columns",
wagtail.core.blocks.StructBlock(
[
(
"background",
wagtail.core.blocks.ChoiceBlock(
choices=[("bg-white", "White"), ("bg-light", "Light"), ("bg-dark", "Dark")]
),
),
(
"left_column",
wagtail.core.blocks.StreamBlock(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
icon="arrow-left",
label="Left column content",
),
),
(
"middle_column",
wagtail.core.blocks.StreamBlock(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
icon="horizontalrule",
label="Middle column content",
),
),
(
"right_column",
wagtail.core.blocks.StreamBlock(
[
("heading", wagtail.core.blocks.CharBlock(classname="full title")),
("paragraph", wagtail.core.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
icon="arrow-right",
label="Right column content",
),
),
]
),
),
(
"button_group",
wagtail.core.blocks.StructBlock(
[
(
"position",
wagtail.core.blocks.ChoiceBlock(
choices=[("left", "Left"), ("center", "Centre"), ("right", "Right")]
),
),
(
"buttons",
wagtail.core.blocks.StreamBlock(
[
(
"button_url",
wagtail.core.blocks.StructBlock(
[
("button_text", wagtail.core.blocks.CharBlock(required=True)),
(
"css_class",
wagtail.core.blocks.CharBlock(
default="btn btn-primary mr-2", required=True
),
),
("url_link", wagtail.core.blocks.URLBlock(required=True)),
]
),
),
(
"button_int",
wagtail.core.blocks.StructBlock(
[
("button_text", wagtail.core.blocks.CharBlock(required=True)),
(
"css_class",
wagtail.core.blocks.CharBlock(
default="btn btn-primary mr-2", required=True
),
),
(
"internal_link",
wagtail.core.blocks.CharBlock(required=True),
),
]
),
),
]
),
),
]
),
),
]
),
)
]
| ianastewart/cwltc-admin | club/migrations/0008_auto_20190402_0901.py | Python | mit | 8,972 |
# Word Problems
# Demonstrates numbers and math
print("If a 2000 pound pregnant hippo gives birth to a 100 pound calf,")
print("but then eats 50 pounds of food, how much does she weigh?")
input("Press the enter key to find out.")
print("2000 - 100 + 50 =", 2000 - 100 + 50)
print("\nIf an adventurer returns from a successful quest and buys each of")
print("6 companions 3 bottles of ale, how many bottles are purchased?")
input("Press the enter key to find out.")
print("6 * 3 =", 6 * 3)
print("\nIf a restaurant check comes to 19 dollars with tip, and you and")
print("your friends split it evenly 4 ways, how much do you each throw in?")
input("Press the enter key to find out.")
print("19 / 4 =", 19 / 4)
print("\nIf a group of 4 pirates finds a chest full of 107 gold coins, and")
print("they divide the booty evenly, how many whole coins does each get?")
input("Press the enter key to find out.")
print("107 // 4 =", 107 // 4)
print("\nIf that same group of 4 pirates evenly divides the chest full")
print("of 107 gold coins, how many coins are left over?")
input("Press the enter key to find out.")
print("107 % 4 =", 107 % 4)
input("\n\nPress the enter key to exit.")
| bohdan-shramko/learning-python | source/chapter02/word_problems.py | Python | mit | 1,182 |
#!/usr/bin/python
""" Tool to setup AWS CLI.
"""
import os
import sys
from subprocess import check_output
def setup_s3(s3_access_key, s3_secret_key):
"""Create S3 configuration file."""
home = os.path.expanduser("~")
aws_dir = os.path.join(home, '.aws')
if not os.path.exists(aws_dir):
os.makedirs(aws_dir)
# Write config file
with open(os.path.join(aws_dir, 'config'), 'w') as f:
f.write('[default]\n')
# Write to disk S3cmd config file
with open(os.path.join(aws_dir, 'credentials'), 'w') as f:
credentials = '[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n' % (s3_access_key, s3_secret_key)
f.write(credentials)
def execute(command):
""" Execute external host command and print it's output."""
output = check_output(command)
print output.rstrip()
def print_usage():
print "Usage: docker run -e S3_ACCESS_KEY=[PUT KEY HERE] -e S3_SECRET_KEY=[PUT KEY HERE] cloudwalk/aws [PUT COMMAND HERE]"
if __name__ == '__main__':
# Get expected environment variables
access_key = os.getenv('S3_ACCESS_KEY')
secret_key = os.getenv('S3_SECRET_KEY')
if access_key is None or secret_key is None:
print_usage()
sys.exit(1)
# Create AWS config file
setup_s3(access_key, secret_key)
# Execute aws command appended by whatever arguments is passed to this script
command = ['aws'] + sys.argv[1:]
execute(command)
| cloudwalkio/docker-aws-cli | aws_cli.py | Python | mit | 1,450 |
from package.visualization import visualize
visualize.plot_radial_chart()
| Iceman121/radial_bar_chart | src/main.py | Python | mit | 75 |
"""
WSGI config for venus project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "venus.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| davogler/venus | venus/wsgi.py | Python | mit | 1,132 |
#!/usr/bin/env python3
import csv, os, sys
from collections import Counter
# import utils
currentdir = os.path.dirname(__file__)
libpath = os.path.join(currentdir, '../../lib')
sys.path.append(libpath)
import SonicScrewdriver as utils
import FileCabinet as filecab
# start by loading the hard seeds
stanford = set()
with open('../lexicons/stanford.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
if row['class'] == 'hard':
stanford.add(row['word'])
sourcedir = '../sourcefiles/'
pairedpaths = filecab.get_pairedpaths(sourcedir, '.tsv')
docids = [x[0] for x in pairedpaths]
wordcounts = filecab.get_wordcounts(sourcedir, '.tsv', docids)
metapath = '../metadata/allgenremeta.csv'
genredict = dict()
datedict = dict()
with open(metapath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
date = int(row['firstpub'])
genre = row['genretags']
docid = row['docid']
if date not in datedict:
datedict[date] = []
datedict[date].append(docid)
genredict[docid] = genre
possible_genres = {'poe', 'fic', 'bio'}
allcounts = dict()
hardseedcounts = dict()
for genre in possible_genres:
allcounts[genre] = Counter()
hardseedcounts[genre] = Counter()
for i in range(1700,2000):
if i in datedict:
candidates = datedict[i]
for anid in candidates:
genre = genredict[anid]
if anid not in wordcounts:
print('error')
continue
else:
for word, count in wordcounts[anid].items():
allcounts[genre][i] += count
if word in stanford:
hardseedcounts[genre][i] += count
with open('plotdata/hardaverages.csv', mode = 'w', encoding = 'utf-8') as f:
f.write('genre,year,hardpct\n')
for genre in possible_genres:
for i in range(1700,2000):
if i in allcounts[genre]:
pct = hardseedcounts[genre][i] / (allcounts[genre][i] + 1)
f.write(genre + ',' + str(i) + ',' + str(pct) + '\n')
| tedunderwood/horizon | chapter1/code/createhardaverages.py | Python | mit | 2,146 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import functools
from urllib.parse import urlparse, parse_qsl
from azure.core.async_paging import AsyncList, AsyncItemPaged
from .._response_handlers import healthcare_result, get_iter_items
async def healthcare_extract_page_data_async(
doc_id_order, obj, response_headers, health_job_state
): # pylint: disable=unused-argument
return (
health_job_state.next_link,
healthcare_result(
doc_id_order, health_job_state.results, response_headers, lro=True
),
)
async def lro_get_next_page_async(
lro_status_callback, first_page, continuation_token, show_stats=False
):
if continuation_token is None:
return first_page
try:
continuation_token = continuation_token.decode("utf-8")
except AttributeError:
pass
parsed_url = urlparse(continuation_token)
job_id = parsed_url.path.split("/")[-1]
query_params = dict(parse_qsl(parsed_url.query.replace("$", "")))
if "showStats" in query_params:
query_params.pop("showStats")
query_params["show_stats"] = show_stats
return await lro_status_callback(job_id, **query_params)
def healthcare_paged_result(
doc_id_order,
health_status_callback,
response,
obj,
response_headers,
show_stats=False,
): # pylint: disable=unused-argument
return AsyncItemPaged(
functools.partial(
lro_get_next_page_async, health_status_callback, obj, show_stats=show_stats
),
functools.partial(
healthcare_extract_page_data_async, doc_id_order, obj, response_headers
),
)
async def analyze_extract_page_data_async(
doc_id_order, task_order, response_headers, analyze_job_state
):
iter_items = get_iter_items(
doc_id_order, task_order, response_headers, analyze_job_state
)
return analyze_job_state.next_link, AsyncList(iter_items)
def analyze_paged_result(
doc_id_order,
task_order,
analyze_status_callback,
response, # pylint: disable=unused-argument
obj,
response_headers,
show_stats=False, # pylint: disable=unused-argument
):
return AsyncItemPaged(
functools.partial(lro_get_next_page_async, analyze_status_callback, obj),
functools.partial(
analyze_extract_page_data_async, doc_id_order, task_order, response_headers
),
)
| Azure/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_response_handlers_async.py | Python | mit | 2,512 |
# -*- coding: utf-8 -*-
# We'll extend the basic TestCase.
from django.test import TestCase
# We'll need to look up some URLs.
from django.core.urlresolvers import reverse
# We'll test the headers with this tool:
from apps.tests.headers import HeaderTests
class TestHelloWorld(TestCase):
"""
Test the hello world page.
"""
def test_headers(self):
"""
Make sure the right headers are present.
"""
# Instantiate the header tests tool.
header_tests = HeaderTests()
# Load the helloworld page.
url = reverse('helloworld')
response = self.client.get(url)
# Test the content type header.
header_tests.check_content_type_header(response)
# Test the security headers.
header_tests.check_security_headers(response)
def test_response_code(self):
"""
Make sure the response code is 200.
"""
# Load the helloworld page.
url = reverse('helloworld')
response = self.client.get(url)
# Does it 200?
self.assertEqual(response.status_code, 200)
def test_response_copy(self):
"""
Make sure the copy is correct.
"""
# Load the helloworld page.
url = reverse('helloworld')
response = self.client.get(url)
# Is the copy correct?
expected = 'Hello World'
self.assertContains(response, expected)
| jtpaasch/skeleton | apps/helloworld/tests/test_helloworld.py | Python | mit | 1,445 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 07:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Number',
),
]
| rafaelmv/smsuela | home/migrations/0002_auto_20170620_0750.py | Python | mit | 379 |
import os
from setuptools import setup
setup(
name = "jsh",
version = "1.0",
author = "jinsub ahn",
author_email = "[email protected]",
description = ("this module allows user to run shell command easily."),
license = "MIT",
keywords = "shell, util",
url = "https://github.com/jinniahn/jsh",
packages=['jsh'],
install_requires=[
'pexpect',
],
classifiers=[
"Topic :: Utilities"
]
)
| jinniahn/jsh | setup.py | Python | mit | 456 |
''' Module for Karma Level Provider Class '''
import karmaserver.utils.print as print_
from karmaserver.data.content_resolver import content_resolver
from karmaserver.data.models.policy import Policy, PolicyNotExistsException, PolicyExistsException
from karmaserver.constants import DEFAULT_FORMULA, MAX_KARMA_LEVEL
class KarmaLevelProviderAbstract: # pragma: no cover
''' Karma Level Provider Abstract, has the methods to calculate the karma
levels and to classify user for its points '''
def print_info(self, default_created):
''' Prints the Provider Configuration '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def get_level(self, policy_id, points):
''' Returns the information for the karma level for the points passed '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def get_levels(self, policy_id):
''' Returns all Karma Data '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def create_policy(self, policy_id, formula, max_level):
''' Creates new policy '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def update_policy(self, policy_id, formula=None, max_level=None):
''' Updates an existing policy '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def delete_policy(self, policy_id):
''' Deletes an existing policy '''
raise NotImplementedError('Abstract class, this method should have been implemented')
class KarmaLevelProvider(KarmaLevelProviderAbstract):
''' Implementation of Karma Level Provider '''
def __init__(self):
pass
# default_created = _create_default_policy_if_not()
# self.print_info(default_created)
def print_info(self, default_created):
print_.initialize_info(self.__class__.__name__, default_created)
if default_created:
print_.info_list('default policy created')
def get_level(self, policy_id, points):
policy = self.__get_policy_or_raise(policy_id)
return policy.get_level(points)
def get_levels(self, policy_id):
policy = self.__get_policy_or_raise(policy_id)
return policy.get_levels()
def create_policy(self, policy_id, formula, max_level):
_raise_if_exists(policy_id)
policy = Policy(policy_id, formula, max_level)
content_resolver.update(policy)
def update_policy(self, policy_id, formula=None, max_level=None):
policy = self.__get_policy_or_raise(policy_id)
if formula:
policy.set_formula(formula)
if max_level:
policy.max_level = max_level
content_resolver.update(policy)
def delete_policy(self, policy_id):
policy = self.__get_policy_or_raise(policy_id)
content_resolver.delete(policy)
@staticmethod
def __get_policy_or_raise(policy_id):
policy = content_resolver.get(Policy, _id=policy_id)
if not policy:
raise PolicyNotExistsException(f'Policy = {policy_id}')
return policy[0]
def _create_default_policy_if_not():
policy = content_resolver.get(Policy, _id='default')
if not policy:
policy = Policy('default', DEFAULT_FORMULA, MAX_KARMA_LEVEL)
content_resolver.update(policy)
return True
def _raise_if_exists(policy_id):
policy = content_resolver.get(Policy, _id=policy_id)
if policy:
raise PolicyExistsException
| mnunezdm/cazasteroides | karmaserver/modules/level/provider.py | Python | mit | 3,624 |
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic import View
from ..models.news import News
from ..models.news import Author
from django.contrib.auth.models import User
class NewsBaseView(View):
"""
Base view for view of news
"""
model = News
def get_context_data(self, **kwargs):
context = super(NewsBaseView, self).get_context_data(**kwargs)
authors = Author.objects.all()
return context
class NewsListView(NewsBaseView, ListView):
model = News
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(NewsListView, self).get_context_data(**kwargs)
return context | PaulWebbster/django-newspaper | newspaper/views/news.py | Python | mit | 732 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM8_then1_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM8_then1_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM8_then1_CompleteLHS, self).__init__(name='HMM8_then1_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HMM8_then1_CompleteLHS')
self["equations"] = []
# Set the node attributes
# apply class ListenBranch(0.25.a.0ListenBranch) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__ListenBranch"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.25.a.0ListenBranch')
# apply class Pattern(0.25.a.1Pattern) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Pattern"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.25.a.1Pattern')
# apply association ListenBranch--match-->Patternnode
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return attr_value == "match" """
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__directLink_T"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.25.a.0ListenBranchassoc20.25.a.1Pattern')
self["equations"].append(((0,'pivot'),('constant','ListenBranch999e149eListenBranch')))
# Add the edges
self.add_edges([
(0,2), # apply class ListenBranch(0.25.a.0ListenBranch) -> association match
(2,1), # association Pattern -> apply class Pattern(0.25.a.1Pattern)
])
# define evaluation methods for each match class.
# define evaluation methods for each apply class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each match association.
# define evaluation methods for each apply association.
def eval_attr13(self, attr_value, this):
return attr_value == "match"
def constraint(self, PreNode, graph):
return True
| levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/from_MPS/HMM8_then1_CompleteLHS.py | Python | mit | 2,393 |
from interpreter import expression_gen
if __name__ == '__main__':
print('Welcome to the Calculator')
state = expression_gen()
input_value = None
while True:
try:
input_value = input('enter ' + state.send(input_value) + ': ')
except StopIteration as e:
print('result:', e.value)
break
| nwilbert/async-examples | calculator/cmd.py | Python | mit | 355 |
__all__ = [
'Task',
]
import inspect
import logging
import sys
from g1.bases import classes
from g1.bases.assertions import ASSERT
from . import errors
from . import traps
LOG = logging.getLogger(__name__)
# Python 3.4 implements PEP 442 for safe ``__del__``.
ASSERT.greater_or_equal(sys.version_info, (3, 4))
class Task:
"""Task object.
A ``Task`` object wraps an coroutine object, and is the basic unit
of scheduling. It is modelled after ``Future` object, which is
commonly used for wrapping a ``Thread`` object. There are a few
notable differences between ``Task`` and ``Future``:
* ``Task`` is cancellable due to its cooperative nature, but
``Future`` is not because threads in general are not cancellable.
* ``get_result`` and ``get_exception`` does not take a ``timeout``
argument. While it is possible to add a ``timeout`` argument, as
a convention we would prefer not to.
NOTE: Although task is cancellable, this should be the last resort
because a cancel only takes effect on the task's next blocking trap,
and this may take much longer than desired; for example, if a task
is sending through a socket and the socket's buffer is somehow never
full, this task may never be blocked and stay running forever.
"""
@staticmethod
def is_coroutine(coro):
# ``types.coroutine`` returns a generator function.
return inspect.iscoroutine(coro) or inspect.isgenerator(coro)
def __init__(self, kernel, coroutine):
# In case ``__init__`` raises.
self._coroutine = None
self._kernel = kernel
self._coroutine = ASSERT.predicate(coroutine, self.is_coroutine)
self._num_ticks = 0
self._completed = False
self._result = None
self._exception = None
self._callbacks = []
self._joined = False
def __del__(self):
# You have to check whether ``__init__`` raises.
if self._coroutine is None:
return
if not self._joined:
# Call ``repr`` to force formatting ``self`` here to avoid
# resurrecting ``self``.
LOG.warning(
'task is garbage-collected but never joined: %s', repr(self)
)
__repr__ = classes.make_repr(
'{self._coroutine!r} ticks={self._num_ticks} '
'{state} {self._result!r} {self._exception!r}',
state=lambda self: 'completed' if self._completed else 'uncompleted',
)
def is_completed(self):
return self._completed
def cancel(self):
# Add ``Task.cancel`` for convenience.
self._kernel.cancel(self)
async def join(self):
self._joined = True
await traps.join(self)
async def get_result(self):
await self.join()
return self.get_result_nonblocking()
async def get_exception(self):
await self.join()
return self.get_exception_nonblocking()
def get_result_nonblocking(self):
ASSERT.true(self.is_completed())
self._joined = True
if self._exception:
raise self._exception
return self._result
def get_exception_nonblocking(self):
ASSERT.true(self.is_completed())
self._joined = True
return self._exception
#
# Package-private interface.
#
def tick(self, trap_result, trap_exception):
"""Run coroutine through the next trap point.
NOTE: ``tick`` catches ``BaseException`` raised from the
coroutine. As a result, ``SystemExit`` does not bubble up to
the kernel event loop. I believe this behavior is similar to
Python threading library and thus more expected (``SystemExit``
raised in non- main thread does not cause CPython process to
exit). If you want raising ``SystemExit`` in a task to be
effective, you have to call ``Task.get_result_nonblocking`` in
the main thread (or implicitly through ``Kernel.run``).
"""
ASSERT.false(self._completed)
if trap_exception:
trap = self._tick(self._coroutine.throw, trap_exception)
else:
trap = self._tick(self._coroutine.send, trap_result)
if trap is not None:
return trap
ASSERT.true(self._completed)
self._call_callbacks()
return None
def abort(self):
"""Close the running coroutine.
This is the last resort for releasing resources acquired by the
coroutine, not a part of normal task cleanup. One good place to
call ``abort`` is when kernel is closing.
"""
if self._completed:
return
LOG.warning('abort task: %r', self)
# ``close`` returns None on success, and raises RuntimeError
# when the coroutine cannot be aborted.
ASSERT.none(self._tick(self._coroutine.close))
if self._completed:
if (
isinstance(self._exception, RuntimeError)
and str(self._exception) == 'coroutine ignored GeneratorExit'
):
LOG.warning('task cannot be aborted: %r', self)
self._completed = False
self._exception = None
else:
self._call_callbacks()
else:
self._completed = True
self._exception = errors.Cancelled('task abort')
self._call_callbacks()
def _tick(self, func, *args):
try:
self._num_ticks += 1
return func(*args)
except errors.TaskCancellation as exc:
self._completed = True
self._exception = errors.Cancelled()
self._exception.__cause__ = exc
except StopIteration as exc:
self._completed = True
self._result = exc.value
except BaseException as exc:
self._completed = True
self._exception = exc
return None
def _call_callbacks(self):
ASSERT.true(self._completed)
callbacks, self._callbacks = self._callbacks, None
for callback in callbacks:
self._call_callback(callback)
def add_callback(self, callback):
if self._completed:
self._call_callback(callback)
else:
self._callbacks.append(callback)
def _call_callback(self, callback):
try:
callback(self)
except Exception:
LOG.exception('callback err: %r, %r', self, callback)
| clchiou/garage | py/g1/asyncs/kernels/g1/asyncs/kernels/tasks.py | Python | mit | 6,511 |
#!/usr/bin/env python
'''
Sentry integration module
'''
from raven import Client
client = Client(
# dns url
dns='https://3dc3f3d21a554b4795c06b5f8a21ac02:39441beb0bb842d8a89 \
[email protected]/147629',
# version
release='0.4.0')
| PhantomGhosts/BigBrother | lib/sentry/sentry.py | Python | mit | 259 |
"""
The Pastebin Channel v1.2
Scrolls the contents of Pastebin's recent public pastes along a simple Pygame
window, allowing for kiosk style display of Pastebin's content.
"""
import urllib2
import pygame
import time
import webbrowser
from pygame.locals import *
from HTMLParser import HTMLParser
# Various editable options
print_help = True
win_width = 800
win_height = 600
fps = 30
scroll_rate = 1
font_face = ["DejaVu Sans Mono", 14]
font_color = (255, 255, 255)
bg_color = (0, 0, 0)
max_linewidth = 250
# Do not edit below this line
quit = False
paused = False
move_frame = False
raw_url = "http://www.pastebin.com/raw.php?i="
archive_url = "http://pastebin.com/archive"
text_lines = []
parser = None
class Line:
"""
A single line to be displayed in the window, represented by a single pygame
surface. Metadata stored so events that necessitate re-creation of the
surface (resize, mostly) can be executed quickly.
"""
destroy = False
content = None
line = None
text = None
color = None
x = 5
y = 5
def __init__(self, content, color=font_color, link=None):
# Truncate the line, since a surface of sufficient width can crash pygame
if len(content) > max_linewidth:
content = content[0:max_linewidth]
self.content = text_font.render(content, 1, color, bg_color)
self.color = color
self.text = content
self.link = link
# Place the line at the bottom of the view, directly under the last
if not len(text_lines):
self.y = win_height
else:
self.y = text_lines[-1].y + font_face[1] + 2
def update(self):
"""
Move the line the appropriate number of pixels for a single refresh,
then re-blit it.
"""
self.y -= scroll_rate
if self.y < -win_height:
self.destroy = True
return
elif self.y <= win_height:
screen.blit(self.content, (self.x, self.y))
def check_click(self, pos):
"""
Check if a given position is within this line and, if so, launch the
user's default web browser to open the paste as is on pastebin.com
"""
if pos[1] >= self.y and pos[1] < self.y + font_face[1]:
if self.link is not None:
webbrowser.open(self.link)
class ArchiveParser(HTMLParser):
"""
Parases the Pastebin Archive at http://www.pastebin.com/archive and returns
a dict where paste_id = (title, format).
Since the Pastebin APi lacks functionality to do what we want, we have to
page scum (I know, bad!).
"""
results = {} # PasteID => (Format, Title)
this_result = "" # Temporary storage for the Paste ID of a result
this_title = "" # Temporary storage for the Title of a result
parsing_table = False
parsing_title = False
def handle_starttag(self, tag, attrs):
# If parsing_table is true, we're in the list of Pastes
if self.parsing_table:
if tag == "a":
# Find all hrefs and build a dict of pastes
for attr in attrs:
if attr[0] == "href":
# hrefs starting with /archive indicate the end of paste data, and also the paste format
if attr[1].startswith("/archive"):
self.results[self.this_result] = (attr[1].split("/")[2], self.this_title)
# Otherwise the href is the paste ID, the data of this <a> is the title
else:
self.this_result = attr[1][1:]
self.parsing_title = True
elif tag == "table" and ('class', 'maintable') in attrs:
self.parsing_table = True
def handle_endtag(self, tag):
# If we find the end of the table of Pastes, we can stop parsing
if self.parsing_table and tag == "table":
self.parsing_table = False
def handle_data(self, data):
# Grab the Title of a Paste and then stop looking for Titles
if self.parsing_title:
self.this_title = data
self.parsing_title = False
def get_paste(paste_id):
"""
Grabs the raw data of the paste with ID paste_id and returns it.
"""
paste_url = raw_url + paste_id
req = urllib2.Request(paste_url)
response = urllib2.urlopen(req)
text = response.read()
try:
# Try to encode in unicode, using the content-type header
encoding = response.headers['content-type'].split('charset=')[-1]
text = unicode(text, encoding)
except:
# If it fails we're not horribly concerned...
pass
return text
def redraw_lines():
"""
In the event of a text resize, we need to redraw all lines. This dumps line
metadata to a temporary list, then redraws lines one by one as surfaces.
"""
global text_lines
text_cache = [(line.text, line.color) for line in text_lines if not line.destroy]
text_lines = []
for line in text_cache:
text_lines.append(Line(line[0], color=line[1]))
scroll_all(-win_height)
def scroll_all(distance):
"""
Loops through the list of lines and moves them 'distance' pixels up.
"""
for line in text_lines:
line.y += distance
global move_frame
move_frame = True
def generate_output(paste_id):
"""
Calls the functions to grab a paste from Pastebin, then parse and append it
to the list of lines to be drawn.
"""
try:
for line in get_paste(paste_id).split('\n'):
line = line.replace("\t", " ")
line = line.replace("\r", "")
line = line.replace("\n", "")
text_lines.append(Line(line, link="http://www.pastebin.com/%s" % paste_id))
except:
pass
# Setup stuff
pygame.init()
screen = pygame.display.set_mode((win_width, win_height), RESIZABLE)
pygame.display.set_caption("Pastebin Channel")
text_font = pygame.font.SysFont(*font_face)
clock = pygame.time.Clock()
# Print help at the top. Probably should be an external file
if print_help:
text_lines.append(Line("PASTEBIN ROULETTE v1.2", color=(255, 255, 0)))
text_lines.append(Line("Now with Unicode support (If your font supports it) and click-to-open support!", color=(255, 255, 0)))
text_lines.append(Line("", color=(255, 255, 0)))
text_lines.append(Line("UPARROW: Scroll Up", color=(255, 255, 0)))
text_lines.append(Line("DOWNARROW: Scroll Down", color=(255, 255, 0)))
text_lines.append(Line("KEYPAD +: Increase Font Size", color=(255, 255, 0)))
text_lines.append(Line("KEYPAD -: Decrease Font Size", color=(255, 255, 0)))
text_lines.append(Line("KEYPAD *: Increase Scroll Speed", color=(255, 255, 0)))
text_lines.append(Line("KEYPAD /: Decrease Scroll Speed", color=(255, 255, 0)))
text_lines.append(Line("SPACE: Pause / Resume Scrolling", color=(255, 255, 0)))
text_lines.append(Line("CLICK: Open clicked Paste in default web browser", color=(255, 255, 0)))
text_lines.append(Line("Escape: Quit", color=(255, 255, 0)))
while not quit:
for event in pygame.event.get():
# Window resized, resize the canvas to match
if event.type == VIDEORESIZE:
win_width = event.w
win_height = event.h
screen = pygame.display.set_mode((win_width, win_height), RESIZABLE)
elif event.type == QUIT:
quit = True
elif event.type == KEYDOWN:
# Space = Pause / Resume
if event.key == K_SPACE:
paused = False if paused else True
# Escape = Quit
elif event.key == K_ESCAPE:
quit = True
# Minus = Shrink text
elif event.key == K_KP_MINUS:
font_face[1] = font_face[1] - 2
text_font = pygame.font.SysFont(*font_face)
redraw_lines()
# Plus = Enlargen text
elif event.key == K_KP_PLUS:
font_face[1] = font_face[1] + 2
text_font = pygame.font.SysFont(*font_face)
redraw_lines()
# Asterisk = Increase scroll speed
elif event.key == K_KP_MULTIPLY:
scroll_rate += 1
# Slash = Decrease scroll speed
elif event.key == K_KP_DIVIDE:
scroll_rate -= 1
# Up = Scroll up
elif event.key == K_UP:
scroll_all(font_face[1] * 10)
# Down = Scroll down
elif event.key == K_DOWN:
scroll_all(-font_face[1] * 10)
# End = Scroll to end of loaded content
elif event.key == K_END:
if len(text_lines):
scroll_all(win_height - text_lines[-1].y)
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
for line in text_lines:
line.check_click(event.pos)
clock.tick(fps)
# !pause or move_frame means we need to redraw stuff
if not paused or move_frame:
move_frame = False
screen.fill(bg_color)
# Move and redraw each line
for line in text_lines:
line.update()
# If our buffer of lines is empty, grab another Paste
if len(text_lines) == 0 or text_lines[-1].y < win_height:
# If we're out of Pastes, grab the Archive page again
if parser is None or len(parser.results.keys()) == 0:
try:
req = urllib2.Request(archive_url)
response = urllib2.urlopen(req)
output = response.read()
try:
encoding = response.headers['content-type'].split('charset=')[-1]
output = unicode(output, encoding)
except:
pass
parser = ArchiveParser()
parser.feed(output)
except Error as e:
time.sleep(10)
# Grab a (Kind of random) key from the result dict
next_result = parser.results.keys()[0]
this_url = "http://www.pastebin.com/%s" % next_result
text_lines.append(Line(""))
text_lines.append(Line("###############################", color=(0, 255, 0), link=this_url))
text_lines.append(Line("TITLE: %s" % parser.results[next_result][1], color=(0, 255, 0), link=this_url))
text_lines.append(Line("FORMAT: %s" % parser.results[next_result][0], color=(0, 255, 0), link=this_url))
text_lines.append(Line(this_url, color=(0, 255, 0), link=this_url))
text_lines.append(Line("###############################", color=(0, 255, 0), link=this_url))
text_lines.append(Line("", link=this_url))
# Generate lines of text from the selected Paste then delete it from the result list
generate_output(next_result)
del(parser.results[next_result])
# Remove any lines from memory that are scrolled past a certain threshold
text_lines = [line for line in text_lines if not line.destroy]
pygame.display.flip()
| trysdyn/pastebin-channel | pastebin.py | Python | mit | 11,275 |
import itertools
l = [str(x) for x in range(10)]
s = 0
for p in itertools.permutations(l):
sv = ''.join(p)
if sv[5]=='0' or sv[5]=='5':
if int(sv[3])%2==0:
if int(sv[2:5])%3==0: #sum([int(x) for x in sv[2:4]])
if int(sv[4:7])%7==0:
if int(sv[5:8])%11==0:
if int(sv[6:9])%13==0:
if int(sv[7:10])%17==0:
#print sv
s += int(sv)
print s
| shashankp/projecteuler | 43.py | Python | mit | 546 |
import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Embedding, Dense, Input
from keras.layers.recurrent import LSTM
from keras.preprocessing import sequence
from os.path import join
class KerasLSTM_1L_1i_emb_lex:
""" Represents concatenation of vectors from embedding with lexicon features
"""
def __init__(self, w2v_models, max_sequence_length):
"""
w2v_models : list
list of word2vec models
"""
self.W2V_MODELS = w2v_models
self.TERMS_SEQUENCE_LENGTH = max_sequence_length
self.lexicons = self.__create_lexicons()
self.LEXICONS_COUNT = len(self.lexicons)
def message_vectorizer(self, labeled_message, term_voc, doc_voc):
"""
Vector builder
labeled_message : dict
dictionary with the following fields: {score, id, terms, features}
term_voc : core.TermVocabulary
vocabulary of terms
doc_voc : core.DocVocabulary
vocabulary of documents
returns : []
concatenation of embediding and feature vectors
"""
# terms
terms = labeled_message['terms']
terms_vector = np.zeros(self.TERMS_SEQUENCE_LENGTH)
i = 0
offset = 0
for model in self.W2V_MODELS:
for term in terms:
if i >= self.TERMS_SEQUENCE_LENGTH:
break
if (term in model.vocab):
terms_vector[i] = model.vocab.get(term).index + offset
i += 1
offset += len(model.vocab)
return {'terms': terms_vector}
def fit(self, train_problem, epochs, batch_size):
"""
Train model using 'train_problem'
"""
# initialize model
self.model = self.__build(
self.W2V_MODELS,
self.TERMS_SEQUENCE_LENGTH + self.LEXICONS_COUNT)
# fit
x_train, y_train = self.__problem_vectorizer(train_problem, 'train')
self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
def predict(self, test_problem, batch_size):
"""
returns : (nd.array, nd.array)
network output and according message ids
"""
x_test, ids = self.__problem_vectorizer(test_problem, 'test')
y_test = self.model.predict(x_test, batch_size=batch_size)
return (y_test, ids)
@staticmethod
def __problem_vectorizer(problem, collection_type):
"""
problem: list [{label, vector}, ... ]
List of vectorized messages. Each message presented as list where
first element is a 'score' or 'id' (depending on the 'train' or
'score' dataset accordingly) and the secont (latter) is a vector --
embedded sentence (obtained by vectorizer)
collection_type: str
'test' or 'train'
returns: (terms, lables)
"""
terms = []
labels = []
for message in problem:
terms.append(message[1])
if (collection_type == 'train'):
y = np.zeros(3)
y[message[0] + 1] = 1
labels.append(y) # class as a label
if (collection_type == 'test'):
labels.append(message[0]) # message ID
return np.vstack(terms), np.vstack(labels)
@staticmethod
def __create_embedding_matrix(w2v_models, lexicons):
"""
creates matrix (words_count, embedding_size) based on list of word2vec
models
w2v_model : list
list of gensim.models.word2vec.Word2Vec models
returns: np.ndarray
shape (words_count, embedding_size)
"""
vector_size = max(m.vector_size for m in w2v_models)
words_count = sum(len(m.vocab) for m in w2v_models)
width = vector_size + len(lexicons)
matrix = np.zeros((words_count, width))
offset = 0
for w2v_model in w2v_models:
for word, info in w2v_model.vocab.items():
index = info.index
w = sequence.pad_sequences(
[w2v_model.syn0[index]], width, padding='post')
lw = KerasLSTM_1L_1i_emb_lex.__lexicon_weights(word, lexicons)
for i, e in enumerate(range(len(lw))):
w[0][w.shape[1] - len(lw) + i] = e
matrix[offset + index] = w
offset += len(w2v_model.vocab)
return matrix
@staticmethod
def __lexicon_weights(term, lexicons):
term = term.encode('utf-8')
v = np.zeros(len(lexicons))
for i, l in enumerate(lexicons):
s = l[term == l['term']]
if (len(s) > 0):
v[i] = s['tone'].values[0]
return v
def __build(self, w2v_models, input_length):
"""
w2v_models : list
list of gensim.models.word2vec.Word2Vec models
"""
input_1 = Input(shape=(input_length,),
dtype='int32',
name='terms_input')
weights = self.__create_embedding_matrix(w2v_models, self.lexicons)
embedding_layer = Embedding(
weights.shape[0],
weights.shape[1],
weights=[weights],
input_length=input_length,
trainable=False)(input_1)
lstm_layer = LSTM(200)(embedding_layer)
network_output = Dense(3, activation='softmax')(lstm_layer)
model = Model(input_1, network_output)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print model.summary()
return model
def __create_lexicons(self):
root = "../data/lexicons/"
names = ["experts_lexicon.csv",
"feb_june_16_lexicon.csv",
"rubtsova_lexicon.csv",
"mtd_rus_lexicon.csv"]
lexicons = []
for name in names:
fp = join(root, name)
print "reading lexicon: {}".format(name)
lexicons.append(pd.read_csv(fp, sep=','))
return lexicons
| nicolay-r/tone-classifier | models/networks/keras/lstm_1l_1i_emb_lex.py | Python | mit | 6,210 |
import pytest
from mock import mock
from bomb_defusal.modules.morse_code import MorseCode
class TestMorseCode(object):
@staticmethod
def _assert_disarm_success(subject, frequency):
subject.on_wrong_try = mock.MagicMock()
TestMorseCode._find_frequency(subject, frequency)
subject.transmit()
assert subject.disarmed
subject.on_wrong_try.assert_not_called()
@staticmethod
def _assert_disarm_failure(subject, frequency):
subject.on_wrong_try = mock.MagicMock()
TestMorseCode._find_frequency(subject, frequency)
subject.transmit()
assert not subject.disarmed
subject.on_wrong_try.assert_called()
@staticmethod
def _find_frequency(subject, frequency):
for _ in range(len(subject.frequencies)):
if subject.frequency == frequency:
break
subject.previous()
else:
for _ in range(len(subject.frequencies)):
if subject.frequency == frequency:
break
subject.next()
def test_word(self):
assert 'brick' == MorseCode(None, False, 'brick').word
def test_code(self):
subject = MorseCode(None, False, 'strobe')
assert ['...', '−', '.−.', '−−−', '−...', '.'] == subject.code
@pytest.mark.parametrize('word,frequency', [
('slick', '3.522 MHz'),
('leaks', '3.542 MHz'),
('flick', '3.555 MHz')
])
def test_correct_frequency(self, word, frequency):
subject = MorseCode(None, False, word)
self._assert_disarm_success(subject, frequency)
@pytest.mark.parametrize('word,frequency', [
('break', '3.592 MHz'),
('sting', '3.600 MHz'),
('beats', '3.572 MHz')
])
def test_wrong_frequency(self, word, frequency):
subject = MorseCode(None, False, word)
self._assert_disarm_failure(subject, frequency)
def test_invalid_word(self):
with pytest.raises(ValueError):
MorseCode(None, False, 'Invalid')
def test_index(self):
subject = MorseCode(None, False, 'break')
start_index = subject.index
subject.next()
assert start_index + 1 == subject.index
subject.previous()
subject.previous()
assert start_index - 1 == subject.index
def test_disarmed_no_action(self):
subject = MorseCode(None, True, 'break')
start_index = subject.index
subject.next()
assert start_index == subject.index
subject.previous()
assert start_index == subject.index
| leupibr/BombDefusal | tests/modules/test_morse_code.py | Python | mit | 2,619 |
from django_mongo_rest.models import BaseModel
from django_mongo_rest.utils import Enum
from django_mongoengine.mongo_auth.managers import get_user_document
from mongoengine import DynamicDocument, StringField, ReferenceField, ObjectIdField
class ACTIONS(Enum):
CREATE = 'C'
UPDATE = 'U'
DELETE = 'D'
class Audit(BaseModel, DynamicDocument):
meta = {
'indexes': ['doc_id']
}
user = ReferenceField(get_user_document())
model = StringField()
doc_id = ObjectIdField()
action = StringField(choices=ACTIONS.choices_dict().items())
def create(request, action, model_class, doc, extra_data):
audit_doc = {
'user': request.user.id,
'model': model_class.get_collection_name(),
'action': action.value,
'doc_id': doc['_id'] if isinstance(doc, dict) else doc,
}
audit_doc.update(extra_data)
Audit.insert_one(audit_doc)
def update(request, model_class, doc, updates):
for k, v in updates.iteritems():
doc[k] = v
model_class.update_by_id(doc['_id'], **updates)
create(request, ACTIONS.UPDATE, model_class, doc, updates)
| TrueSkills/django-mongo-rest | django_mongo_rest/audit.py | Python | mit | 1,126 |
# manage.py
import os
import unittest
import coverage
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from project import app, db
from project.models import User
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without coverage."""
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
cov = coverage.coverage(branch=True, include='project/*')
cov.start()
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
cov.stop()
cov.save()
print('Coverage Summary:')
cov.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
cov.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
cov.erase()
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User("[email protected]", "admin"))
db.session.commit()
if __name__ == '__main__':
manager.run()
| mjhea0/flask-basic-registration | manage.py | Python | mit | 1,546 |
# -*- coding: utf-8 -*-
"""
news_data.service.stats_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Module that provides statistics about data in the DB. Useful to
see what's loaded in the DB, and to monitor + debug the processing
pipeline.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime
from datetime import timedelta
from db import mongo
db_raw_articles = None
db_parsed_articles = None
db_analyzed_articles = None
db_metric_data_daily = None
db_metric_data_monthly = None
def init_db():
global db_raw_articles, db_parsed_articles, db_analyzed_articles,\
db_metric_data_daily, db_metric_data_monthly
# Init DB
db_raw_articles = mongo.get_raw_articles()
db_parsed_articles = mongo.get_parsed_articles()
db_analyzed_articles = mongo.get_analyzed_articles()
db_metric_data_daily = mongo.get_metric_data_daily()
db_metric_data_monthly = mongo.get_metric_data_monthly()
def get_collection_counts(time_start, time_end, verbose=False):
""" Get counts of each collection.
"""
start_time = datetime.now()
time_bound_query = create_time_bound_query(time_start, time_end)
response = {
"total_parsed_articles" :
db_parsed_articles.find(time_bound_query).count(),
"total_analyzed_articles" :
db_analyzed_articles.find(time_bound_query).count(),
"total_metric_data_daily" :
db_metric_data_daily.count(), #TODO: Limit to time range
"total_metric_data_monthly" :
db_metric_data_monthly.count() #TODO: Limit to time range
}
# Raw articles are a special case, requires an aggregation
response["total_raw_articles"] = db_raw_articles.aggregate([
{"$match" : time_bound_query},
{"$project" : {"count" : "$count"}},
{"$group" : {"_id" : 1, "total_count" : {"$sum" : "$count"}}}
]).get("result")[0].get("total_count")
if verbose:
print " - %sms for get_collection_counts()" % \
(datetime.now() - start_time)
return response
def get_raw_articles_stats(time_start, time_end, verbose=False):
""" Get stats about raw_articles from mongoDB
"""
start_time = datetime.now()
time_bound_query = create_time_bound_query(time_start, time_end)
response = {"count" : []}
for count in db_raw_articles.find(time_bound_query).sort("published"):
response["count"].append(count["count"])
if verbose:
print " - %sms for get_raw_articles_stats()" % \
(datetime.now() - start_time)
return response
def get_parsed_articles_stats(time_start, time_end, verbose=False):
""" Get stats about parsed_articles from mongoDB
"""
start_time = datetime.now()
time_bound_query = create_time_bound_query(time_start, time_end)
parsed_articles_stats = db_parsed_articles.aggregate([
{"$match" : time_bound_query},
{"$project" : {
"date" : {
"y" : { "$year" : "$published" },
"m" : { "$month" : "$published" }},
"size_raw" : "$size_raw",
"size_parsed" : "$size_parsed",
"size_ratio" : "$size_ratio"
}},
{"$group" : {
"_id" : "$date",
"count" : {"$sum" : 1},
"size_raw_sum" : {"$sum" : "$size_raw"},
"size_raw_avg" : {"$avg" : "$size_raw"},
"size_raw_min" : {"$min" : "$size_raw"},
"size_raw_max" : {"$max" : "$size_raw"},
"size_parsed_sum" : {"$sum" : "$size_parsed"},
"size_parsed_avg" : {"$avg" : "$size_parsed"},
"size_parsed_min" : {"$min" : "$size_parsed"},
"size_parsed_max" : {"$max" : "$size_parsed"},
"size_ratio_avg" : {"$avg" : "$size_ratio"},
"size_ratio_min" : {"$min" : "$size_ratio"},
"size_ratio_max" : {"$max" : "$size_ratio"}
}},
{"$sort" : {"_id" : 1}}
])
# Init the response object, then add each data point
response = {
"count" : [],
"size_raw_sum" : [],
"size_raw_avg" : [],
"size_raw_min" : [],
"size_raw_max" : [],
"size_parsed_sum" : [],
"size_parsed_avg" : [],
"size_parsed_min" : [],
"size_parsed_max" : [],
"size_ratio_avg" : [],
"size_ratio_min" : [],
"size_ratio_max" : []
}
for dp in parsed_articles_stats["result"]:
response["count"].append(dp["count"])
response["size_raw_sum"].append(dp["size_raw_sum"])
response["size_raw_avg"].append(dp["size_raw_avg"])
response["size_raw_min"].append(dp["size_raw_min"])
response["size_raw_max"].append(dp["size_raw_max"])
response["size_parsed_sum"].append(dp["size_parsed_sum"])
response["size_parsed_avg"].append(dp["size_parsed_avg"])
response["size_parsed_min"].append(dp["size_parsed_min"])
response["size_parsed_max"].append(dp["size_parsed_max"])
response["size_ratio_avg"].append(dp["size_ratio_avg"])
response["size_ratio_min"].append(dp["size_ratio_min"])
response["size_ratio_max"].append(dp["size_ratio_max"])
# To work around RaphaelJS Graph bug, show ratio
# as 0-100 instead # of 0.0 to 1.0
for i in range (len(response["size_ratio_avg"])):
response["size_ratio_avg"][i] = int(response["size_ratio_avg"][i]*100)
response["size_ratio_min"][i] = int(response["size_ratio_min"][i]*100)
response["size_ratio_max"][i] = int(response["size_ratio_max"][i]*100)
if verbose:
print " - %sms for get_parsed_articles_stats()" % \
(datetime.now() - start_time)
return response
def get_analyzed_articles_stats(time_start, time_end, verbose=False):
""" Get stats about analyzed_articles from mongoDB
"""
start_time = datetime.now()
time_bound_query = create_time_bound_query(time_start, time_end)
analyzed_articles_stats = db_analyzed_articles.aggregate([
{"$match" : time_bound_query},
{"$project" : {
"date" : {
"y" : { "$year" : "$published" },
"m" : { "$month" : "$published" }},
"unique_terms_count" : "$unique_terms_count",
"total_terms_count" : "$total_terms_count"
}},
{"$group" : {
"_id" : "$date",
"count" : {"$sum" : 1},
"unique_terms_sum" : {"$sum" : "$unique_terms_count"},
"unique_terms_avg" : {"$avg" : "$unique_terms_count"},
"unique_terms_min" : {"$min" : "$unique_terms_count"},
"unique_terms_max" : {"$max" : "$unique_terms_count"},
"total_terms_sum" : {"$sum" : "$total_terms_count"},
"total_terms_avg" : {"$avg" : "$total_terms_count"},
"total_terms_min" : {"$min" : "$total_terms_count"},
"total_terms_max" : {"$max" : "$total_terms_count"}
}},
{"$sort" : {"_id" : 1}}
])
# Init the response object, then add each data point
response = {
"count" : [],
"unique_terms_sum" : [],
"unique_terms_avg" : [],
"unique_terms_min" : [],
"unique_terms_max" : [],
"total_terms_sum" : [],
"total_terms_avg" : [],
"total_terms_min" : [],
"total_terms_max" : []
}
for dp in analyzed_articles_stats["result"]:
response["count"].append(dp["count"])
response["unique_terms_sum"].append(dp["unique_terms_sum"])
response["unique_terms_avg"].append(dp["unique_terms_avg"])
response["unique_terms_min"].append(dp["unique_terms_min"])
response["unique_terms_max"].append(dp["unique_terms_max"])
response["total_terms_sum"].append(dp["total_terms_sum"])
response["total_terms_avg"].append(dp["total_terms_avg"])
response["total_terms_min"].append(dp["total_terms_min"])
response["total_terms_max"].append(dp["total_terms_max"])
if verbose:
print " - %sms for get_analyzed_articles_stats()" % \
(datetime.now() - start_time)
return response
def get_metric_data_daily_stats(time_start, time_end, verbose=False):
start_time = datetime.now()
if verbose:
print " - %sms for get_metric_data_daily_stats()" % \
(datetime.now() - start_time)
return {}
def get_metric_data_monthly_stats(time_start, time_end, verbose=False):
start_time = datetime.now()
if verbose:
print " - %sms for get_metric_data_monthly_stats()" % \
(datetime.now() - start_time)
return {}
def create_time_bound_query(time_start, time_end):
return {
"published" : {"$gte" : time_start},
"published" : {"$lte" : time_end}
}
# Initialize connection to DB when loading module
init_db() | lbracken/news_data | service/stats_service.py | Python | mit | 8,922 |
from django.conf.urls import patterns, include, url
from django.conf import settings
#from django.conf.urls.static import static
#from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
def bad(request):
""" Simulates a server error """
1 / 0
urlpatterns = patterns('',
# Examples:
# url(r'^$', TemplateView.as_view(template_name='base.html')),
# url(r'^$', '{{ project_name }}.views.home', name='home'),
# url(r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^bad/$', bad),
url(r'', include('base.urls')),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| mmarksnippety/django-project-template | project_name/project_name/urls.py | Python | mit | 1,236 |
import json
from nowallet.exchange_rate import CURRENCIES
def settings_json(coin="BTC"):
return json.dumps(
[
{
"type": "bool",
"title": "RBF",
"desc": "Use opt in replace by fee?",
"section": "nowallet",
"key": "rbf"
}, {
"type": "options",
"title": "Coin Units",
"desc": "Preferred Bitcoin denomination",
"section": "nowallet",
"key": "units",
"options": [coin, "m{}".format(coin), "u{}".format(coin)]
}, {
"type": "options",
"title": "Currency",
"desc": "Fiat currency for exchange rates",
"section": "nowallet",
"key": "currency",
"options": CURRENCIES
}, {
"type": "options",
"title": "Block Explorer",
"desc": "Preferred block explorer",
"section": "nowallet",
"key": "explorer",
"options": ["blockcypher", "smartbit"]
}, {
"type": "options",
"title": "Price Provider",
"desc": "Preferred price provider",
"section": "nowallet",
"key": "price_api",
"options": ["BitcoinAverage", "CryptoCompare"]
}
]
)
| metamarcdw/nowallet | settings_json.py | Python | mit | 1,469 |
import boto
from boto.mturk.connection import MTurkConnection, MTurkRequestError
from boto.mturk.question import ExternalQuestion
from connection import connect
import urllib
import argparse
import ConfigParser
import sys, os
import time
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('answers_file', nargs=1, type=argparse.FileType('r'), default=sys.stdin, help="File or stdin containing documents paths")
parser.add_argument('config_file', type=str, help="Config file containing parameters to spin the batch")
args = parser.parse_args()
config = ConfigParser.ConfigParser()
config.read(args.config_file)
mtc = connect(config.get('default', 'target'))
answers_file = pd.read_csv(args.answers_file[0], sep='\t')
batchname = args.answers_file[0].name.split('/')[-1]
approved = []
for assignmentId in answers_file['assignmentId']:
try:
x = mtc.approve_assignment(assignmentId)
print "Approved", assignmentId
approved.append({'assignmentId': assignmentId, 'response': x})
except MTurkRequestError as err:
print err
approved_dir = './approved/'
if not os.path.exists(approved_dir):
os.makedirs(approved_dir)
approved_filepath = approved_dir+batchname
pd.DataFrame(approved).to_csv(path_or_buf = approved_filepath, sep = '\t', index=False,
columns=['assignmentId', 'response'],
encoding='utf-8')
| arunchaganty/kbp-online | turkApi/approve_assignments.py | Python | mit | 1,387 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import geoposition.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0013_location_user'),
]
operations = [
migrations.AddField(
model_name='location',
name='position',
field=geoposition.fields.GeopositionField(max_length=42, null=True, blank=True),
),
]
| georgebcservices/coffeedapp | core/migrations/0014_location_position.py | Python | mit | 464 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from friday import friday
import click
click.disable_unicode_literals_warning = True
@click.command()
def main(args=None):
"""Console script for friday"""
assistant = friday.Friday()
while assistant.is_active:
request = assistant.listen()
doable = assistant.think(request)
if doable:
successful = assistant.perform(request)
if successful:
# Decide which response the assistant should return to the user.
# assistant.choose_response()
# Display a response to the user.
assistant.respond()
else:
assistant.apologize()
else:
assistant.refuse()
if __name__ == "__main__":
main()
| Zenohm/Friday | friday/cli.py | Python | mit | 865 |
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):33002")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
| woodedlawn/woodcoin | contrib/seeds/makeseeds.py | Python | mit | 709 |
#!/usr/bin/env python3
"""Launch Polybar in multiple monitors."""
import os
import subprocess
import sys
from time import sleep
def _sh_no_block(cmd, *args, **kwargs):
if isinstance(cmd, str):
cmd = cmd.split()
return subprocess.Popen(cmd, *args, **kwargs)
def _sh(cmd, *args, **kwargs):
res, _ = _sh_no_block(cmd, *args, stdout=subprocess.PIPE, **kwargs).communicate()
return res
def launch_polybar(monitors):
"""Launch polybar taking into account monitor setup."""
# Terminate already running bar instances
_sh('killall -q polybar')
# Wait until the processes have been shut down
while _sh(f'pgrep -u {os.getuid()} -x polybar'):
sleep(0.2)
# Launch the main bar in each monitor but try to set the systray always in
# primary one (overrides polybar's first come first serve rule. See:
# https://github.com/jaagr/polybar/issues/1070)
active_monitors = [
line.decode('ascii').split()
for line in _sh('xrandr --listactivemonitors').splitlines()
]
nr_monitors = int(active_monitors[0][-1])
prim_w = int(active_monitors[1][2].split('/')[0])
all_hidpi = prim_w > HD_WIDTH
if nr_monitors > 1:
sec_w = int(active_monitors[2][2].split('/')[0])
all_hidpi = all_hidpi and sec_w > HD_WIDTH
if nr_monitors > 2:
third_w = int(active_monitors[3][2].split('/')[0])
all_hidpi = all_hidpi and third_w > HD_WIDTH
xrandr = [line.decode('ascii').split() for line in _sh('xrandr').splitlines()]
for line in xrandr:
if 'connected' in line:
if monitors == 'mirror' and 'primary' not in line:
# When mirroring it's enough to show the bar on the primary monitor
continue
monitor = line[0]
width_index = 3 if 'primary' in line else 2
try:
width = int(line[width_index].split('x')[0])
except ValueError:
# If there is no resolution info then the monitor is connected but inactive
continue
env = os.environ.copy()
env['MONITOR'] = monitor
env['POLYHEIGHT'] = '55' if (width > HD_WIDTH) else '28'
env['TRAY_SIZE'] = '32' if (width > HD_WIDTH) else '20'
# If we have a mix of hd and hidpi monitors then we need to scale
fontmap_index = 1
if width > HD_WIDTH and not all_hidpi:
fontmap_index = 2
for i in range(7):
env[f'POLYFONT{i}'] = FONT_MAP[i][0].format(*FONT_MAP[i][fontmap_index])
env['TRAY_POS'] = 'right' if 'primary' in line else ''
_sh_no_block('polybar --reload main', env=env)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--monitors', '-m', nargs='?', default='xrandr')
parse_args = parser.parse_args()
HD_WIDTH = 1920
FONT_MAP = {
0: ('Noto Sans:size={};3', ['11'], ['21']),
1: ('Noto Sans:size={}:weight=bold;2', ['11'], ['21']),
2: ('Noto Sans Mono:size={}:weight=bold;2', ['10'], ['20']),
3: ('Symbols Nerd Font:size={};4', ['13'], ['26']),
4: ('Symbols Nerd Font:size={};4', ['14'], ['28']),
5: ('Symbols Nerd Font:size={};4', ['12'], ['24']),
6: ('Noto Sans:size={}:weight=bold;{}', ['7', '-5'], ['14', '-10']),
}
launch_polybar(parse_args.monitors)
sys.exit(0)
| petobens/dotfiles | arch/config/polybar/launch.py | Python | mit | 3,471 |
# -*- coding: utf-8 -*-
import collections
from . import topics
from clare import common
class Logging(object):
def __init__(self, sender, logger):
"""
Parameters
----------
sender : clare.common.messaging.producer.senders.Sender
logger : logging.Logger
"""
self._sender = sender
self._logger = logger
def push(self, record, timeout):
"""
Parameters
----------
record : clare.common.messaging.records.Record
timeout : float
"""
self._sender.push(record=record, timeout=timeout)
arguments = collections.OrderedDict()
arguments['path'] = record.value
event = common.logging.Event(topic=topics.Topic.ROOM_FOUND,
arguments=arguments)
message = event.to_json()
self._logger.info(msg=message)
def __repr__(self):
repr_ = '{}(sender={}, logger={})'
return repr_.format(self.__class__.__name__,
self._sender,
self._logger)
| dnguyen0304/clare | clare/clare/application/room_list_watcher/senders.py | Python | mit | 1,108 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for loading dataset for the CTC model.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import basename
import random
import numpy as np
from utils.dataset.base import Base
from utils.io.inputs.frame_stacking import stack_frame
from utils.io.inputs.splicing import do_splice
class DatasetBase(Base):
def __init__(self, *args, **kwargs):
super(DatasetBase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
input_i = np.array(self.input_paths[index])
label_i = np.array(self.label_paths[index])
return (input_i, label_i)
def __next__(self, batch_size=None):
"""Generate each mini-batch.
Args:
batch_size (int, optional): the size of mini-batch
Returns:
A tuple of `(inputs, labels, inputs_seq_len, input_names)`
inputs: list of input data of size
`[num_gpu, B, T_in, input_size]`
labels: list of target labels of size
`[num_gpu, B, T_out]`
inputs_seq_len: list of length of inputs of size
`[num_gpu, B]`
input_names: list of file name of input data of size
`[num_gpu, B]`
is_new_epoch (bool): If true, 1 epoch is finished
"""
if self.max_epoch is not None and self.epoch >= self.max_epoch:
raise StopIteration
# NOTE: max_epoch = None means infinite loop
if batch_size is None:
batch_size = self.batch_size
# reset
if self.is_new_epoch:
self.is_new_epoch = False
if not self.is_test:
self.padded_value = -1
else:
self.padded_value = None
# TODO(hirofumi): move this
if self.sort_utt:
# Sort all uttrances by length
if len(self.rest) > batch_size:
data_indices = sorted(list(self.rest))[:batch_size]
self.rest -= set(data_indices)
# NOTE: rest is uttrance length order
else:
# Last mini-batch
data_indices = list(self.rest)
self.reset()
self.is_new_epoch = True
self.epoch += 1
if self.epoch == self.sort_stop_epoch:
self.sort_utt = False
self.shuffle = True
# Shuffle data in the mini-batch
random.shuffle(data_indices)
elif self.shuffle:
# Randomly sample uttrances
if len(self.rest) > batch_size:
data_indices = random.sample(list(self.rest), batch_size)
self.rest -= set(data_indices)
else:
# Last mini-batch
data_indices = list(self.rest)
self.reset()
self.is_new_epoch = True
self.epoch += 1
# Shuffle selected mini-batch
random.shuffle(data_indices)
else:
if len(self.rest) > batch_size:
data_indices = sorted(list(self.rest))[:batch_size]
self.rest -= set(data_indices)
# NOTE: rest is in name order
else:
# Last mini-batch
data_indices = list(self.rest)
self.reset()
self.is_new_epoch = True
self.epoch += 1
# Load dataset in mini-batch
input_list = np.array(list(
map(lambda path: np.load(path),
np.take(self.input_paths, data_indices, axis=0))))
label_list = np.array(list(
map(lambda path: np.load(path),
np.take(self.label_paths, data_indices, axis=0))))
if not hasattr(self, 'input_size'):
self.input_size = input_list[0].shape[1]
if self.num_stack is not None and self.num_skip is not None:
self.input_size *= self.num_stack
# Frame stacking
input_list = stack_frame(input_list,
self.num_stack,
self.num_skip,
progressbar=False)
# Compute max frame num in mini-batch
max_frame_num = max(map(lambda x: x.shape[0], input_list))
# Compute max target label length in mini-batch
max_seq_len = max(map(len, label_list))
# Initialization
inputs = np.zeros(
(len(data_indices), max_frame_num, self.input_size * self.splice),
dtype=np.float32)
labels = np.array(
[[self.padded_value] * max_seq_len] * len(data_indices))
inputs_seq_len = np.zeros((len(data_indices),), dtype=np.int32)
input_names = list(
map(lambda path: basename(path).split('.')[0],
np.take(self.input_paths, data_indices, axis=0)))
# Set values of each data in mini-batch
for i_batch in range(len(data_indices)):
data_i = input_list[i_batch]
frame_num, input_size = data_i.shape
# Splicing
data_i = data_i.reshape(1, frame_num, input_size)
data_i = do_splice(data_i,
splice=self.splice,
batch_size=1,
num_stack=self.num_stack)
data_i = data_i.reshape(frame_num, -1)
inputs[i_batch, :frame_num, :] = data_i
if self.is_test:
labels[i_batch, 0] = label_list[i_batch]
else:
labels[i_batch, :len(label_list[i_batch])
] = label_list[i_batch]
inputs_seq_len[i_batch] = frame_num
###############
# Multi-GPUs
###############
if self.num_gpu > 1:
# Now we split the mini-batch data by num_gpu
inputs = np.array_split(inputs, self.num_gpu, axis=0)
labels = np.array_split(labels, self.num_gpu, axis=0)
inputs_seq_len = np.array_split(
inputs_seq_len, self.num_gpu, axis=0)
input_names = np.array_split(input_names, self.num_gpu, axis=0)
else:
inputs = inputs[np.newaxis, :, :, :]
labels = labels[np.newaxis, :, :]
inputs_seq_len = inputs_seq_len[np.newaxis, :]
input_names = np.array(input_names)[np.newaxis, :]
self.iteration += len(data_indices)
# Clean up
del input_list
del label_list
return (inputs, labels, inputs_seq_len, input_names), self.is_new_epoch
| hirofumi0810/tensorflow_end2end_speech_recognition | utils/dataset/ctc.py | Python | mit | 6,845 |
import sys
import json
from simfile import *
from pprint import pprint
filename = sys.argv[1]
simfile = {}
simfile['song'] = {}
try:
sim = Simfile(filename)
except:
print "ERROR"
#print "ERROR"
exit()
try:
simfile['song']['title'] = sim['TITLE']
except:
simfile['song']['title'] = ""
try:
simfile['song']['artist'] = sim['ARTIST']
except:
simfile['song']['artist'] = ""
try:
simfile['song']['banner'] = sim['BANNER']
except:
simfile['song']['banner'] = ""
try:
simfile['song']['background'] = sim['BACKGROUND']
except:
simfile['song']['background'] = ""
try:
simfile['song']['credit'] = sim['CREDIT']
except:
simfile['song']['credit'] = ""
try:
simfile['song']['subtitle'] = sim['SUBTITLE']
except:
simfile['song']['subtitle'] = ""
try:
simfile['song']['artisttranslit'] = sim['ARTISTTRANSLIT']
except:
simfile['song']['artisttranslit'] = ""
try:
simfile['song']['titletranslit'] = sim['TITLETRANSLIT']
except:
simfile['song']['titletranslit'] = ""
try:
simfile['song']['subtitletranslit'] = sim['SUBTITLETRANSLIT']
except:
simfile['song']['subtitletranslit'] = ""
try:
simfile['song']['bgchanges'] = sim['BGCHANGES']
except:
simfile['song']['bgchanges'] = ""
try:
simfile['song']['fgchanges'] = sim['FGCHANGES']
except:
simfile['song']['fgchanges'] = ""
simfile['song']['charts'] = {}
for chart in sim.charts:
type = str(chart.stepstype)
if not simfile['song']['charts'].get(type):
simfile['song']['charts'][type] = {}
simfile['song']['charts'][type][chart.difficulty] = {}
simfile['song']['charts'][type][chart.difficulty]['meter'] = chart.meter
simfile['song']['charts'][type][chart.difficulty]['description'] = chart.description
if chart.stepstype in 'dance-single':
simfile['song']['charts'][type][chart.difficulty]['notes'] = {}
taps = 0
holds = 0
jumps = 0
mines = 0
rolls = 0
for notes in chart.notes:
if notes[1] in ("1001","0110","1100","0011", "1010", "0101"):
jumps+=1
taps+=2
elif notes[1] in ("2002","0220","2200","0022", "2020", "0202"):
jumps+=1
holds+=2
taps+=2
elif notes[1] in ("1002","0120","1200","0012", "1020", "0102", "2001","0210","2100","0021", "2010", "0201"):
jumps+=1
holds+=1
taps+=2
else:
for l in list(notes[1]):
if l in ("M", "m"):
mines+=1
elif int(l) == 1:
taps+=1
elif int(l) == 2:
holds+=1
taps+=1
elif int(l) == 4:
rolls+=1
taps+=1
#else:
simfile['song']['charts'][type][chart.difficulty]['notes']['taps'] = taps
simfile['song']['charts'][type][chart.difficulty]['notes']['holds'] = holds
simfile['song']['charts'][type][chart.difficulty]['notes']['jumps'] = jumps
simfile['song']['charts'][type][chart.difficulty]['notes']['mines'] = mines
simfile['song']['charts'][type][chart.difficulty]['notes']['rolls'] = rolls
print json.dumps(simfile, sort_keys=True, indent=4)
| concubidated/stepmania-song-search | scripts/sm_parse.py | Python | mit | 3,006 |
from django.apps import AppConfig
class GuiConfig(AppConfig):
name = 'gui'
| Jasper-Koops/THESIS_LIFEBOAT | DJANGO_GUI/django_gui/gui/apps.py | Python | mit | 81 |
"""
csvfile.py
---------------
This module contains some classes which can treat csv files.
.. autoclass:: fileio.csvfile.CSV
"""
import time
import codecs
import pathlib
import calendar
import h5py
import pandas
import numpy as np
class CSVFile(object):
def __init__(self, csvfile, header_info='ohlc', bitask_fmt='bitask',
daytime_format='%Y%m%d %H%M%S', delimiter=';', header=None,
unit='minute'):
self.csvfile = pathlib.Path(csvfile).expanduser().resolve()
self.data = None
self.delimiter = delimiter
self.daytime_format = daytime_format
self.header_info = header_info
self.header = header
self.unit = unit
self._read()
def _read(self):
'''
day-time, Open(BID), High(BID), Low(BID), Close(BID),
'''
self.data = pandas.read_csv(str(self.csvfile),
sep=self.delimiter,
header=self.header
)
def convert_daytime_to_seconds(self, daytime) -> int:
s = self.datetime.strptime(daytime, self.daytime_format)
return calendar.timegm(s.utctimetuple())
def convert_seconds_to_daytime(self, seconds):
return self.datetime.utcfromtimestamp(seconds).strftime(self.daytime_format)
def show(self):
'''
create an image.
'''
pass
| 0h-n0/forex_py | frxpy/data/csvfile.py | Python | mit | 1,436 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Andrew Brown <[email protected], [email protected]>
# Copyright (c) 2015 Stephen Larroque <[email protected]>
# See LICENSE.txt for license terms
# For fast software computation in Finite Fields, see the excellent paper: Huang, Cheng, and Lihao Xu. "Fast software implementation of finite field operations." Washington University in St. Louis, Tech. Rep (2003).
# to understand the basic mathematical notions behind finite fields, see the excellent tutorial: http://research.swtch.com/field
from ._compat import _range
import array
# Galois Field's characteristic, by default, it's GF(2^8) == GF(256)
# Note that it's -1 (thus for GF(2^8) it's really 255 and not 256) because this is historically tied to the definition of Reed-Solomon codes: since the 0 and 256 values are impossible, we effectively have only 255 possible values. But later were defined (singly) extended Reed-Solomon codes, which include the 0 and thus 256 values, and then doubly extended Reed-Solomon codes which include the 0 and 256 == infinity.
GF2_charac = int(2**8 - 1)
GF2_c_exp = 8
# Exponent table for generator=3 and prim=0x11b in GF(2^8)
GF2int_exptable = array.array("i", [1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19,
53, 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34,
102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112,
144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104,
184, 211, 110, 178, 205, 76, 212, 103, 169, 224, 59, 77, 215, 98,
166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208, 107, 189, 220,
127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16,
48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125,
135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, 251, 22,
58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195,
94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218,
117, 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223,
122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175, 234, 37,
111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27,
45, 119, 153, 176, 203, 70, 202, 69, 207, 74, 222, 121, 139, 134,
145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54, 90, 238, 41, 123,
141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124,
132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1])
# Logarithm table for the same GF parameters
GF2int_logtable = array.array("i", [-1, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, # log(0) is undefined, it should be none, but for performance we use an array, thus we need to set an integer, here we replace None by -1
3, 100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105,
28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114,
154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130,
69, 53, 147, 218, 142, 150, 143, 219, 189, 54, 208, 206, 148, 19,
92, 210, 241, 64, 70, 131, 56, 102, 221, 253, 48, 191, 6, 139, 98,
179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182,
30, 66, 58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155,
159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87, 175, 88, 168,
80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44,
215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81,
160, 127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164,
118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161,
108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188,
149, 207, 205, 55, 63, 91, 209, 83, 57, 132, 60, 65, 162, 109, 71,
20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146, 217, 35, 32, 46,
137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197,
49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7])
def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
''' Returns a list of primes < n '''
sieve = [True] * (n//2)
for i in _range(3,int(n**0.5)+1,2):
if sieve[i//2]:
sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)
return [2] + [2*i+1 for i in _range(1,n//2) if sieve[i]]
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = _range(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = GF2int(1)
for i in _range(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = x.multiply(generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes # you can use the following to print the hexadecimal representation of each prime polynomial: print [hex(i) for i in correct_primes]
def init_lut(generator=3, prim=0x11b, c_exp=8):
'''Precompute the logarithm and anti-log (look-up) tables for faster computation later, using the provided primitive polynomial.
These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2.
The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y.
That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables.
For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks
'''
# generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books.
# prim is the primitive/prime (binary) polynomial and must be irreducible (it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between 0 and 255, and not a list of gf values. For more infos: http://research.swtch.com/field
# note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables).
global GF2int_exptable, GF2int_logtable, GF2_charac, GF2_c_exp
GF2_charac = int(2**c_exp - 1)
GF2_c_exp = int(c_exp)
exptable = [-1] * (GF2_charac+1) # anti-log (exponential) table. The first two elements will always be [GF2int(1), generator]
logtable = [-1] * (GF2_charac+1) # log table, log[0] is impossible and thus unused
# Construct the anti-log table
# It's basically the cumulative product of 1 by the generator number, on and on and on until you have walked through the whole field.
# That's why exptable is always dense (all entries are filled), but logtable may be sparse (lots of empty values, because multiple logtable's entries point to the same exptable's entry).
g = GF2int(1)
for i in range(GF2_charac+1): # note that the last item of exptable will always be equal to the first item in the table, because g^p==g^0 because of the modulo p (we're in a finite field!).
exptable[i] = g # compute anti-log for this value and store it in a table
#logtable[g] = i # compute logtable at the same time as exptable (but log[1] will always be equal to g^255, which may be weird when compared to lists of logtables online but this is equivalent)
g = g.multiply(generator, prim, GF2_charac+1) # equivalent to: g = generator**(i+1)
# Construct the log table
# Ignore the last element of the field because fields wrap back around.
# The log of 1 can have two values: either g^0 (the exact value change depending on parameters) or it could be 255 (g^255=1) because of the wraparound
# Note that either way, this does not change anything any output later (ie, the ecc symbols will be the same either way).
for i, x in enumerate(exptable[:-1]):
logtable[x] = i
# Optimization: convert to integer arrays
GF2int_exptable = array.array('i', exptable)
GF2int_logtable = array.array('i', logtable)
return GF2int_exptable, GF2int_logtable
class GF2int(int):
'''Instances of this object are elements of the field GF(2^p)
Instances are integers in the range 0 to p-1
This field is defined using the irreducable polynomial
x^8 + x^4 + x^3 + x + 1
and using 3 as the generator for the exponent table and log table.
'''
__slots__ = [] # define all properties to save memory (can't add new properties at runtime) and it speeds up a lot. Here there's no property at all since it's only a type extending integers.
# Maps integers to GF2int instances
#cache = {}
# def __new__(cls, value): # Note: works but commented out because on computers, we'd rather use less CPU than use less memory.
# # Check cache
# # Caching sacrifices a bit of speed for less memory usage. This way,
# # there are only a max of 256 instances of this class at any time.
# try:
# return GF2int.cache[value]
# except KeyError:
# if value > GF2_charac or value < 0:
# raise ValueError("Field elements of GF(2^p) are between 0 and %i. Cannot be %s" % (GF2_charac, value))
# newval = int.__new__(cls, value)
# GF2int.cache[int(value)] = newval
# return newval
def __add__(a, b):
'''Addition in GF(2^8) is the xor of the two'''
# Technical notes on why it works: In practice only one table is needed. That would be for the GP(256) multiply. Note that all arithmetic is carry-less, meaning that there is no carry-propagation.
# Addition and subtraction without carry is equivalent to an xor.
# So in GF(256), a + b and a - b are both equivalent to a xor b.
# For more infos, see the great post at http://stackoverflow.com/questions/8440654/addition-and-multiplication-in-a-galois-field
return GF2int(a ^ b)
__sub__ = __add__
__radd__ = __add__
__rsub__ = __add__
def __neg__(self):
return self
def __mul__(a, b):
'''Multiplication in GF(2^8)'''
# GF(256) multiplication is also carry-less, and can be done using carry-less multiplication in a similar way with carry-less addition/subtraction. This can be done efficiently with hardware support via say Intel's CLMUL instruction set.
# a * b is really the same as exp(log(a) + log(b)). And because GF256 has only 256 elements, there are only GF2_charac unique powers of "x", and same for log. So these are easy to put in a lookup table.
if a == 0 or b == 0: # not an optimization, it's necessary because log(0) is undefined
return GF2int(0)
x = GF2int_logtable[a]
y = GF2int_logtable[b]
#z = (x + y) % GF2_charac # in logarithms, addition = multiplication after exponentiation
# Faster implementation of finite field multiplication: z = (log[a]+log[b] & GF2_charac) + (log[a]+log[b] >> GF2_c_exp), you can replace GF2_charac by (2^m)-1 and GF2_c_exp by m (eg: for GF(2^16), you'd get 65535 and 16). This optimization was shown in paper: "Fast software implementation of finite field operations", Cheng Huang and Lihao Xu, Washington University in St. Louis, Tech. Rep (2003).
z = (x + y)
z = (z & GF2_charac) + (z >> GF2_c_exp)
return GF2int(GF2int_exptable[z])
__rmul__ = __mul__
def __pow__(self, power, modulo=None):
# TODO: maybe try to implement the fast exponentiation here (implement binary exponentiation in Galois Fields that uses Montgomery Multiplication and using normal basis): http://stackoverflow.com/a/11640271/1121352 Algorithms for exponentiation in finite fields, by Shuhong Gao, Joachim Von Zur Gathen, Daniel Panario and Victor Shoup
if isinstance(power, GF2int):
raise TypeError("Raising a Field element to another Field element is not defined. power must be a regular integer")
x = GF2int_logtable[self]
z = (x * power) % GF2_charac
return GF2int(GF2int_exptable[z])
def inverse(self):
e = GF2int_logtable[self]
return GF2int(GF2int_exptable[GF2_charac - e])
def __div__(self, other):
#return self * GF2int(other).inverse() # self / other = self * inv(other) . This is equivalent to what is below, but 2x slower.
if self == 0 or other == 0:
return GF2int(0)
x = GF2int_logtable[self]
y = GF2int_logtable[other]
z = (x - y) % GF2_charac # in logarithms, substraction = division after exponentiation
return GF2int(GF2int_exptable[z])
__floordiv__ = __div__
__truediv__ = __div__
def __rdiv__(self, other):
return self.inverse() * other
__rfloordiv__ = __rdiv__
__rtruediv__ = __rdiv__
def __repr__(self):
n = self.__class__.__name__
return "%s(%r)" % (n, int(self))
def _to_binpoly(x):
'''Convert a Galois Field's number into a nice polynomial'''
if x <= 0: return "0"
b = 1 # init to 2^0 = 1
c = [] # stores the degrees of each term of the polynomials
i = 0 # counter for b = 2^i
while x > 0:
b = (1 << i) # generate a number power of 2: 2^0, 2^1, 2^2, ..., 2^i. Equivalent to b = 2^i
if x & b : # then check if x is divisible by the power of 2. Equivalent to x % 2^i == 0
# If yes, then...
c.append(i) # append this power (i, the exponent, gives us the coefficient)
x ^= b # and compute the remainder of x / b
i = i+1 # increment to compute the next power of 2
return " + ".join(["x^%i" % y for y in c[::-1]]) # print a nice binary polynomial
def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True):
'''A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations).
This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together.
The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are:
1- carry-less addition and substraction (XOR in GF(2^p))
2- modular reduction (to avoid duplicate values in the field) using a prime polynomial
'''
r = 0
a = int(a)
b = int(b)
while b: # while b is not 0
if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).
b = b >> 1 # equivalent to b // 2
a = a << 1 # equivalent to a*2
if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).
return GF2int(r)
def multiply_slow(x, y, prim=0x11b):
'''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in _range(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result
| lrq3000/unireedsolomon | unireedsolomon/ff.py | Python | mit | 23,474 |
import SimpleParser
import sys
from itertools import chain, combinations
def powerset(iterable):
xs = list(iterable)
# note we return an iterator rather than a list
return list(chain.from_iterable( combinations(xs,n) for n in range(len(xs)+1) ))
def main():
parser = SimpleParser.SimpleParser("data/transaction2.txt")
parser.parse()
items = ["1","2","3","4","5","6","7","8","9"]
#t = ["1","2","3","4"]
#print list(combinations(t,2))
#items = [["1"],["2"],["3"],["4"],["5"],["6"],["7"],["8"],["9"]]
ps = powerset(items)
for p in ps:
sup = parser.support( list(p) )
if sup > 0:
print str(list(p)) + ":"+ str(sup )
if __name__ == '__main__':
main()
| coreyauger/digdug | main.py | Python | mit | 682 |
i = 4
d = 4.0
s = 'HackerRank '
# Declare second integer, double, and String variables.
i2, d2, s2 = None, None, None
# Read and save an integer, double, and String to your variables.
i2 = int(input())
d2 = float(input())
s2 = input()
# Print the sum of both integer variables on a new line.
print(i+i2)
# Print the sum of the double variables on a new line.
print(d+d2)
# Concatenate and print the String variables on a new line
# The 's' variable above should be printed first.
print(s+s2)
| vasadhananjay/HackerRank_30-Day-of-Code | Day01.py | Python | mit | 498 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sys import argv
import sys
import MySQLdb
#Get our data from the title
#Format: ShowName Episode Number - Title
filename = argv[1]
filename = filename[:-4]
show = filename.split("Episode")[0].strip()
db = MySQLdb.connect("localhost", "root", "", "tynime")
cursor = db.cursor()
sql = "SELECT seriesId FROM series WHERE seriesName = \"%s\";" % show
cursor.execute(sql)
if cursor.rowcount == 1:
sys.stdout.write(str(cursor.fetchone()[0]))
else:
sql = "INSERT INTO series(seriesName) VALUES (\"%s\");" % show
cursor.execute(sql)
sys.stdout.write(str(cursor.lastrowid))
db.commit() | ty-a/tynime | scripts/getSeriesId.py | Python | mit | 631 |
from geo import db
from geoalchemy2 import Geometry
from geo import app
import shapely.wkb
from shapely.geometry import mapping
from shapely.geometry import shape
from geoalchemy2.compat import buffer, bytes
import geojson
class Title(db.Model):
__tablename__ = 'titles'
id = db.Column(db.Integer, primary_key=True)
title_number = db.Column(db.String(64))
extent = db.Column(Geometry('MULTIPOLYGON', srid=app.config['SPATIAL_REFERENCE_SYSTEM_IDENTIFIER']))
def __init__(self, title_number=None, extent=None):
self.title_number = title_number
self.extent = extent
def __repr__(self):
return "Title id: %d title number: %s extent: %s" % (self.id, self.title_number, self.extent)
def set_extent_from_geojson(self, geojson_extent):
"""
Accepts geojson containing either MultiPolygon or Polygon.
If it is a Polygon, it is converted to a MultiPolygon
"""
extent = geojson.loads(geojson_extent)
if extent['geometry']['type'] == 'Polygon':
coordinates = []
coordinates.append(extent['geometry']['coordinates'])
extent['geometry']['coordinates'] = coordinates
extent['geometry']['type'] = 'MultiPolygon'
crs = extent['crs']['properties']['name'].split(':')
self.extent = 'SRID=%s;%s' % (crs[len(crs) -1] , shape(extent['geometry']).wkt)
def to_dict(self):
"""
Returns title number and geojson representing the extent.
Everything is stored as a multi-polygon, but we convert to a polygon if the
multi-polygon contains only 1 polygon
"""
shape = mapping(shapely.wkb.loads(bytes(self.extent.data)))
extent = {}
extent['crs'] = {'type': 'name', 'properties':{'name': 'urn:ogc:def:crs:EPSG:%s' % self.extent.srid}}
extent['type'] = 'Feature'
extent['properties'] = {}
if len(shape['coordinates']) == 1:
extent['geometry'] = {'coordinates': shape['coordinates'][0], 'type': 'Polygon'}
else:
extent['geometry'] = {'coordinates': shape['coordinates'], 'type': 'MultiPolygon'}
return {'title_number': self.title_number, 'extent': extent} | LandRegistry/geo | geo/models.py | Python | mit | 2,236 |
###########################################################################################
# Univesidade Federal de Pernambuco -- UFPE (http://www.ufpe.br)
# Centro de Informatica -- CIn (http://www.cin.ufpe.br)
# Bacharelado em Sistemas de Informacao
# IF968 -- Programacao 1
#
# Autor: Eric Araujo
# Pedro Vinicius
#
# Email: [email protected]
# [email protected]
#
# Data: 2016-06-10
#
# Descricao: Este e' um modelo de arquivo para ser utilizado para a implementacao
# do projeto pratico da disciplina de Programacao 1.
# A descricao do projeto encontra-se no site da disciplina e trata-se
# de uma adaptacao do projeto disponivel em
# http://nifty.stanford.edu/2016/manley-urness-movie-review-sentiment/
# O objetivo deste projeto e' implementar um sistema de analise de
# sentimentos de comentarios de filmes postados no site Rotten Tomatoes.
#
# Licenca: The MIT License (MIT)
# Copyright(c) 2016 Eric Araujo, Pedro Vinicius.
#
###########################################################################################
import sys
import re
def clean_up(s):
''' Retorna uma versao da string 's' na qual todas as letras sao
convertidas para minusculas e caracteres de pontuacao sao removidos
de ambos os extremos. A pontuacao presente no interior da string
e' mantida intacta.
'''
punctuation = ''''!"`/\',;:.-?)([]<>*#\n\t\r'''
result = s.lower().strip(punctuation)
return result
def split_on_separators(original, separators):
''' Retorna um vetor de strings nao vazias obtido a partir da quebra
da string original em qualquer dos caracteres contidos em 'separators'.
'separtors' e' uma string formada com caracteres unicos a serem usados
como separadores. Por exemplo, '^$' e' uma string valida, indicando que
a string original sera quebrada em '^' e '$'.
'''
return filter(lambda x: x != '',re.split('[{0}]'.format(separators),original))
def stopWords(fname):
'''Pega as palavras de um arquivo de stop words e os guarda em uma lista.'''
lista = []
f = open(fname, 'r')
for termo in f.readlines():
limpa = clean_up(termo)
lista.append(limpa)
f.close()
return lista
def readTrainingSet(fname):
''' Recebe o caminho do arquivo com o conjunto de treinamento como parametro
e retorna um dicionario com triplas (palavra,freq,escore) com o escore
medio das palavras no comentarios.
'''
words = dict()
stop = stopWords('stopWords.txt') #lista de stopWords
f = open(fname, 'r')
for linha in f.readlines():
separar = list(split_on_separators(linha,' ')) #separa em palavras a linha do arquivo, e retorna uma lista com as mesmas.
score = int(separar.pop(0)) #retira o score da lista e o salva.
f.close()
for palavra in separar:
limpa = clean_up(palavra) #depois que o score e' retirado, as palavras sao limpadas pela funcao uma a uma.
if limpa != '': #se por acaso a palavra apresentar alguma restricao do clean_up, ela devolve string vazia.
for PalStop in stop: #checa se a palavra depois de limpa e nao string vazia, esta na lista de stopWords.
if PalStop == limpa: #se tiver a renomeia como string vazia e nao entra na condicao posterior.
limpa = ''
if limpa != '':
if limpa not in words: #checa se e' uma nova palavra, se for ela e' adicionada no dicionario.
freq = 1
words[limpa] = [limpa,freq,score]
else:
words[limpa][1] += 1
words[limpa][2] += score #se ela ja' existir, apenas soma a seus devidos lugares.
for elemento in words:
media = words[elemento][2]//words[elemento][1]
words[elemento] = (elemento,words[elemento][1],media) #renomeia cada conteudo das chaves do dicionario com a frequencia geral e a media dos scores inteiros.
return words
def readTestSet(fname):
''' Esta funcao le o arquivo contendo o conjunto de teste
retorna um vetor/lista de pares (escore,texto) dos
comentarios presentes no arquivo.
'''
reviews = []
f = open(fname,'r')
for linha in f.readlines(): #le as linhas, as limpa de algumas restricoes.
limpa = clean_up(linha)
palavras = [] #apenas é criado uma lista local para separar as strings e pegar o score na posicao 1.
for partes in limpa: #mesmo pegando string por string e adicionando na lista, o objetivo é pegar o score.
palavras.append(partes)
Pont = int(palavras.pop(0)) #pega o score.
reviews.append((Pont,limpa[1:])) #adiciona na lista reviews a pontuacao e o comentario e os coloca na tupla e poe na lista.
f.close()
# Implementado
return reviews
def computeSentiment(review,words):
''' Retorna o sentimento do comentario recebido como parametro.
O sentimento de um comentario e' a media dos escores de suas
palavras. Se uma palavra nao estiver no conjunto de palavras do
conjunto de treinamento, entao seu escore e' 2.
Review e' a parte textual de um comentario.
Words e' o dicionario com as palavras e seus escores medios no conjunto
de treinamento.
'''
score = 0.0
count = 0 #aqui checa o comentario e calcula a media score/count.
stop = stopWords('stopWords.txt')
if review != '':
separar = list(split_on_separators(review, ' ')) #separa.
for palavra in separar:
limpa = clean_up(palavra) #limpa.
if limpa != '': #se a funcao devolver string vazia, ela nao entra.
for PalStop in stop: #checa se a palavra esta nas stopWords, para quando chegar na proxima parte nao ir somando o desnecessario.
if PalStop == limpa:
limpa = ''
if limpa in words: #se existir, soma o score que ta salvo no dicionario na chave da palavra e soma o count.
score += words[limpa][2]
count += 1
else:
score += 2 #se nao existir, soma 2 no score, porque e' neutro.
count += 1
return score/count
def computeSumSquaredErrors(reviews, words):
''' Computa a soma dos quadrados dos erros dos comentarios recebidos
como parametro. O sentimento de um comentario e' obtido com a
funcao computeSentiment.
Reviews e' um vetor de pares (escore,texto)
Words e' um dicionario com as palavras e seus escores medios no conjunto
de treinamento.
'''
sse = 0
resultado_final = 0
for frase in reviews: #calcula o resultado final, a soma dos quadrados dos erros.
sentimento = computeSentiment(frase[1], words) #devolve o score/count.
if frase[0] != sentimento:
diferenca = frase[0] - sentimento #faz a diferenca entre a nota do comentario e o sentimento obtido da frase.
resultado = diferenca**2
resultado_final += resultado
sse = resultado_final / len(reviews)
return sse
def main():
# Os arquivos sao passados como argumentos da linha de comando para o programa
# Voce deve buscar mais informacoes sobre o funcionamento disso (e' parte do
# projeto).
# A ordem dos parametros e' a seguinte: o primeiro e' o nome do arquivo
# com o conjunto de treinamento, em seguida o arquivo do conjunto de teste.
if len(sys.argv) < 3:
print ('Numero invalido de argumentos')
print ('O programa deve ser executado como python sentiment_analysis.py <arq-treino> <arq-teste>')
sys.exit(0)
# Lendo conjunto de treinamento e computando escore das palavras
words = readTrainingSet(sys.argv[1])
# Lendo conjunto de teste
reviews = readTestSet(sys.argv[2])
# Inferindo sentimento e computando soma dos quadrados dos erros
sse = computeSumSquaredErrors(reviews,words)
print ('A soma do quadrado dos erros e\': {0}'.format(sse))
if __name__ == '__main__':
main()
| eacj/projetop1 | sentiment_analysis.py | Python | mit | 9,379 |
"""Mixin for _send_to_jira().
This Mixin should now only be accessed via the send_to_bugtracker() Mixin
rather than directly by callers in our codebase. Any changes to this
Mixin's interface need to be in sync with the interface of bugtracker.
"""
from __future__ import absolute_import
import json
import logging
import six
from . import base
_BASE_JIRA_API_URL = 'https://khanacademy.atlassian.net/rest/api/2'
_SEVERITY_TO_JIRA_PRIORITY = {
logging.CRITICAL: '2', # P1
logging.ERROR: '3', # P2
logging.WARNING: '4', # P3
logging.INFO: '5', # P4
logging.DEBUG: '5', # P4
logging.NOTSET: '5', # Should not be used if avoidable
}
# Associates initiative names with their corresponding Jira project keys
_BUGTRACKER_TO_JIRA_PROJ = {
"Classroom": "CLASS",
"Content Platform": "CP",
"Infrastructure": "INFRA",
"Learning Platform": "LP",
"Test Prep": "TP",
"Test": "TEST"
}
# Associates a Jira project key with an issue type ID for that project's
# default issue type
_PROJECT_TO_ISSUETYPE_ID = {
"CLASS": "10201", # Support
"CP": "10201", # Support
"INFRA": "10103", # Bug
"LP": "10201", # Support
"TP": "10201", # Support
"TEST": "10201" # Support
}
def _call_jira_api(endpoint, payload_json=None):
# This is a separate function just to make it easy to mock for tests.
jira_api_key = base.secret('jira_api_key')
req = six.moves.urllib.request.Request(_BASE_JIRA_API_URL + endpoint)
req.add_header('Authorization', 'Basic %s' % jira_api_key)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
if isinstance(payload_json, str):
payload_json = payload_json.encode('utf-8')
res = six.moves.urllib.request.urlopen(req, payload_json)
res_json = res.read()
# In the case that we're adding a watcher to an issue, success is
# indicated by 204, no content in the reseponse
if res.getcode() == 204:
return
# When looking for a username via _get_jira_usernames, Jira API returns 404
# if there is no user corresponding to that email address in their system
# This should not cause us to error out, just log that a user wasn't found.
elif res.getcode() == 404:
return
elif res.getcode() >= 300:
raise ValueError(res.read())
else:
return json.loads(res_json)
def _make_jira_api_call(endpoint, payload_json=None):
"""Make a GET or POST request to Jira API."""
if not base.secret('jira_api_key'):
logging.error("Not sending to Jira (no API key found): %s",
payload_json)
return
try:
return _call_jira_api(endpoint, payload_json)
except Exception as e:
logging.error("Failed sending %s to Jira: %s"
% (payload_json, e))
def _get_jira_project_keys():
"""Return a list of all project keys."""
res = _make_jira_api_call('/project') or []
return [proj['key'] for proj in res]
def _get_jira_usernames(watchers):
"""Return a list of Jira usernames to be added as watchers.
This takes a list of email addresses as provided from --cc and
individually queries Jira's user search for the Jira username
corresponding to that email.
"""
all_watchers = []
for watcher in watchers:
params = six.moves.urllib.parse.urlencode({'username': watcher})
req_url = '/user/search?%s' % params
res = _make_jira_api_call(req_url)
if res:
all_watchers.append(res[0]['key'])
else:
logging.warning('Unable to find a Jira user associated with '
'the email address: %s' % watcher)
return all_watchers
def _check_issue_already_exists(project, summary):
"""Check whether the new issue has already been created in Jira.
Gets any issues created with the same project + summary combination that
have not already been resolved. If the API call fails, this will not
error out but will return False and could potentially result in a dup.
"""
jql_string = ('project="%s" AND summary~"%s" AND status!="Done"'
% (project, summary))
params = six.moves.urllib.parse.urlencode({'jql': jql_string})
req_url = '/search?%s' % params
res = _make_jira_api_call(req_url)
if res is None:
logging.error('Failed testing current issue for uniqueness. '
'This issue might be created as a duplicate.')
return False
return res['total'] > 0
def _format_labels(labels):
"""Remove any spaces in a label name."""
return ['_'.join(label.split()) for label in labels]
def _add_watchers(issue_key, watchers):
"""Add a list of Jira usernames as watchers to a given issue.
Jira's /issue endpoint does not support adding a list of watchers
during issue creation, but rather requires adding the issue first,
then capturing the newly created issue key and making a separate
call to issue/<issue_key>/watchers.
Furthermore, there is a known bug in the formatting of Jira usernames
when adding them via this endpoint, which requires an extra set of
quotes. See https://jira.atlassian.com/browse/JRASERVER-29304.
TODO(jacqueline): Once this Jira Bug has been resolved, we'll need to
amend this formatting.
"""
for watcher in watchers:
watcher = "\"%s\"" % watcher
_make_jira_api_call('/issue/%s/watchers' % issue_key,
"%s" % watcher)
class Mixin(base.BaseMixin):
"""Mixin for _send_to_jira()."""
def _send_to_jira(self,
project_name=None,
labels=None,
watchers=None):
"""Send alert to Jira.
This is not intended to be publicly accessible function as all
send to jira usage should originate with the bugtracker, a wrapper
which can be modified to redirect to preferred bug tracking systems.
See bugtracker Mixin for more on its wrapper functionality.
Arguments:
project_name: The generic project name, which will be converted
into a Jira project key, that the alert should be posted to.
e.g. 'Infrastructure' or 'Test Prep'
labels: A list of labels to be added to the Jira issue.
e.g. ['design', 'awaiting_deploy']
watchers: A list of emails that should be converted to Jira
usernames and added as watchers on the issue.
e.g. ['[email protected]']
"""
if not self._passed_rate_limit('jira'):
return self
labels = labels or []
watchers = watchers or []
project_key = _BUGTRACKER_TO_JIRA_PROJ.get(project_name)
if project_key is None:
logging.error('Invalid Jira project name or no name provided. '
'Failed to send to Jira.')
return self
else:
all_projects = _get_jira_project_keys()
if all_projects and project_key not in all_projects:
logging.error('This is no longer a valid Jira project key. '
'The bugtracker to jira project map may need to '
'be updated.')
return self
elif not all_projects:
logging.error('Unable to verify Jira project key. This issue '
'may not be created successfully.')
issue_type = _PROJECT_TO_ISSUETYPE_ID[project_key]
priority = _SEVERITY_TO_JIRA_PRIORITY[self.severity]
issue_title = self._get_summary() or ('New Auto generated Jira task')
description = self.message
labels.append('auto_generated')
jira_labels = _format_labels(labels)
jira_watchers = _get_jira_usernames(watchers)
payload = {"fields": {
"project": {"key": project_key},
"issuetype": {"id": issue_type},
"reporter": {"name": "jirabot"},
"priority": {"id": priority},
"labels": jira_labels,
"summary": issue_title,
"description": description,
}
}
payload_json = json.dumps(payload, sort_keys=True,
ensure_ascii=False).encode('utf-8')
if self._in_test_mode():
logging.info("alertlib: would send to jira: %s"
% (payload_json))
else:
# check that the issue does not already exist
if not _check_issue_already_exists(project_key, self.summary):
r = _make_jira_api_call('/issue', payload_json)
if jira_watchers:
issue_key = r['key']
_add_watchers(issue_key, jira_watchers)
return self # so we can chain the method calls
| Khan/alertlib | alertlib/jira.py | Python | mit | 9,067 |
#!/usr/bin/env python
from __future__ import absolute_import
import unittest
import algorithms
class TestInsertionSort(unittest.TestCase):
def test_insertion_sort_with_one_item(self):
iterable = [1]
algorithms.insertion_sort(iterable)
expected = [1]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_two_items_1(self):
iterable = [1, 2]
algorithms.insertion_sort(iterable)
expected = [1, 2]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_two_items_2(self):
iterable = [2, 1]
algorithms.insertion_sort(iterable)
expected = [1, 2]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_1(self):
iterable = [1, 2, 3]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_2(self):
iterable = [1, 3, 2]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_3(self):
iterable = [2, 1, 3]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_4(self):
iterable = [2, 3, 1]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_5(self):
iterable = [3, 1, 2]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_three_items_6(self):
iterable = [3, 2, 1]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_ascending_items(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_descending_items(self):
iterable = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
algorithms.insertion_sort(iterable)
expected = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_equal_items(self):
iterable = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
algorithms.insertion_sort(iterable)
expected = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.assertEqual(iterable, expected)
def test_insertion_sort_with_strings(self):
iterable = ['a', 's', 'd', 'f']
algorithms.insertion_sort(iterable)
expected = ['a', 'd', 'f', 's']
self.assertEqual(iterable, expected)
def test_insertion_sort_with_no_items(self):
iterable = []
algorithms.insertion_sort(iterable)
expected = []
self.assertEqual(iterable, expected)
def test_insertion_sort_with_none_iterable_raises_type_error(self):
iterable = None
with self.assertRaises(TypeError):
algorithms.insertion_sort(iterable)
def test_insertion_sort_is_stable_1(self):
iterable = [[1], [1], [1], [1]]
ids = [id(item) for item in iterable]
algorithms.insertion_sort(iterable)
expected = [id(item) for item in iterable]
self.assertEqual(ids, expected)
def test_insertion_sort_is_stable_2(self):
iterable = [[1], [2], [3], [1]]
ids = [id(item) for item in iterable if item[0] == 1]
algorithms.insertion_sort(iterable)
expected = [id(item) for item in iterable if item[0] == 1]
self.assertEqual(ids, expected)
def test_insertion_sort_is_stable_3(self):
iterable = [[2], [3], [1], [1]]
ids = [id(item) for item in iterable if item[0] == 1]
algorithms.insertion_sort(iterable)
expected = [id(item) for item in iterable if item[0] == 1]
self.assertEqual(ids, expected)
def test_insertion_sort_is_stable_4(self):
iterable = [[3], [2], [3], [1]]
ids = [id(item) for item in iterable if item[0] == 1]
algorithms.insertion_sort(iterable)
expected = [id(item) for item in iterable if item[0] == 1]
self.assertEqual(ids, expected)
if __name__ == '__main__':
unittest.main()
| michaelreneer/Algorithms | python/test/test_insertion_sort.py | Python | mit | 4,120 |
#!/usr/bin/env python
# encoding: utf-8
from time import sleep
from flask_restful import Resource
from flask import request
from . import api
from config.logger import Log
from helpers import get_job_details, get_raw_log_output, get_playbooks_list
from queue import Rds
from tasks import RunJob
from uuid import uuid4
import json
log = Log.getLogger(__name__)
@api.route('/')
class List_Playbooks(Resource):
def get(self):
playbooks_list = get_playbooks_list()
return {'playbooks': playbooks_list}
@api.route('/<playbook>')
class Playbook(Resource):
def post(self, playbook):
log.debug("Inovke run playbook task.")
job_params = request.get_json()
job_params['playbook'] = playbook
task_id = str(uuid4())
job = RunJob()
result = job.apply_async(args=[job_params], task_id = task_id, time_limit=86400)
# celery task is a asynchronous call, so add a wait for redis to get message
sleep(0.1)
job_details = get_job_details(task_id)
log.debug("Job details: %s" % job_details)
return {
'playbook': job_details['playbook'],
'task_id': job_details['task_id']
}
def get(self, playbook):
return
@api.route('/<playbook>/detail')
class Playbook_Detail(Resource):
def get(self, playbook):
raise NotImplementedError
@api.route('/task')
class List_tasks(Resource):
def get(self):
raise NotImplementedError
@api.route('/task/<task_id>')
class Task_Detail(Resource):
def get(self, task_id):
log.debug("Inovke task result inspecting.")
######
# step 1: get raw log
raw_log_output = get_raw_log_output(task_id)
######
# step 2: get result from db
job_details = get_job_details(task_id)
return {
'job_task_id': task_id,
'raw_log_output': raw_log_output,
'job_details': job_details,
}
@api.route('/callback/<task_id>')
class Callback(Resource):
def get(self, task_id):
log = Log.getLogger(__name__ + ".Callback.get")
queue = Rds('job-' + task_id)
queue_data = queue.getter()
log.debug("Task event callback called: %s %s" % (task_id, json.dumps(queue_data)))
def post(self, task_id):
log = Log.getLogger(__name__ + ".Callback.post")
data = {}
data = request.get_json()
queue = Rds('job-' + task_id)
log.debug(queue._key_name)
queue_data = queue.getter()
step_result_id = len(queue_data['step_result']) + 1
queue_data['step_result'][step_result_id] = data
queue_data['update_time'] = data['timestamp']
if data.has_key('result'):
queue_data['result'] = data['result']
# if data.has_key['task_msg']:
# queue_data['last_message'] = data
if data.has_key('task_msg'):
queue_data['last_message'] = data['task_msg']
queue.setter(queue_data)
log.debug("Task event updated from callback: %s %s" % (task_id, json.dumps(queue_data)))
# log.debug(data)
| Little-gg/kree | main/routes.py | Python | mit | 3,180 |
# TODO: add tests!
def test_cache():
assert True
| hzdg/django-combocache | tests/test_cache.py | Python | mit | 53 |
NODE, EDGE, ATTR = range(3)
class Node(object):
def __init__(self, name, attrs):
self.name = name
self.attrs = attrs
def __eq__(self, other):
return self.name == other.name and self.attrs == other.attrs
class Edge(object):
def __init__(self, src, dst, attrs):
self.src = src
self.dst = dst
self.attrs = attrs
def __eq__(self, other):
return (self.src == other.src and
self.dst == other.dst and
self.attrs == other.attrs)
class Graph(object):
def __init__(self, data=None):
pass
| smalley/python | exercises/dot-dsl/dot_dsl.py | Python | mit | 602 |
from src.extensions import db
from sqlalchemy.dialects.postgresql import JSON
class PDFForm(db.Model):
__tablename__ = 'pdfform'
id = db.Column(db.Integer, primary_key=True, index=True)
added_on = db.Column(db.DateTime(), server_default=db.func.now())
original_pdf = db.Column(db.LargeBinary)
original_pdf_title = db.Column(db.String(128))
fdf_mapping = db.Column(JSON)
post_count = db.Column(db.Integer, default=0)
latest_post = db.Column(db.DateTime())
def __repr__(self):
return '<PDFForm:"[{}] {}">'.format(self.id, self.original_pdf_title)
| zhoux10/pdfhook | src/pdfhook/models.py | Python | mit | 594 |
from collections import Mapping, MutableSequence, Sequence
from django.utils.module_loading import import_string
from six import string_types
from .base import Base
from .checker import Checker
from .settings import DottedAccessDict, Settings
from .types import LocalSetting
from .strategy import INIJSONStrategy
class Loader(Base):
def __init__(self, file_name, section=None, registry=None, strategy_type=INIJSONStrategy):
super(Loader, self).__init__(file_name, section, registry, strategy_type)
def load_and_check(self, base_settings, prompt=None):
"""Load settings and check them.
Loads the settings from ``base_settings``, then checks them.
Returns:
(merged settings, True) on success
(None, False) on failure
"""
checker = Checker(self.file_name, self.section, self.registry, self.strategy_type, prompt)
settings = self.load(base_settings)
if checker.check(settings):
return settings, True
return None, False
def load(self, base_settings):
"""Merge local settings from file with ``base_settings``.
Returns a new settings dict containing the base settings and the
loaded settings. Includes:
- base settings
- settings from extended file(s), if any
- settings from file
"""
is_valid_key = lambda k: k.isupper() and not k.startswith('_')
# Base settings, including `LocalSetting`s, loaded from the
# Django settings module.
valid_keys = (k for k in base_settings if is_valid_key(k))
base_settings = DottedAccessDict((k, base_settings[k]) for k in valid_keys)
# Settings read from the settings file; values are unprocessed.
settings_from_file = self.strategy.read_file(self.file_name, self.section)
settings_from_file.pop('extends', None)
# The fully resolved settings.
settings = Settings(base_settings)
settings_names = []
settings_not_decoded = set()
for name, value in settings_from_file.items():
for prefix in ('PREPEND.', 'APPEND.', 'SWAP.'):
if name.startswith(prefix):
name = name[len(prefix):]
name = '{prefix}({name})'.format(**locals())
break
settings_names.append(name)
# Attempt to decode raw values. Errors in decoding at this
# stage are ignored.
try:
value = self.strategy.decode_value(value)
except ValueError:
settings_not_decoded.add(name)
settings.set_dotted(name, value)
# See if this setting corresponds to a `LocalSetting`. If
# so, note that the `LocalSetting` has a value by putting it
# in the registry. This also makes it easy to retrieve the
# `LocalSetting` later so its value can be set.
current_value = base_settings.get_dotted(name, None)
if isinstance(current_value, LocalSetting):
self.registry[current_value] = name
# Interpolate values of settings read from file. When a setting
# that couldn't be decoded previously is encountered, its post-
# interpolation value will be decoded.
for name in settings_names:
value = settings.get_dotted(name)
value, _ = self._interpolate_values(value, settings)
if name in settings_not_decoded:
value = self.strategy.decode_value(value)
settings.set_dotted(name, value)
# Interpolate base settings.
self._interpolate_values(settings, settings)
self._interpolate_keys(settings, settings)
self._prepend_extras(settings, settings.pop('PREPEND', None))
self._append_extras(settings, settings.pop('APPEND', None))
self._swap_list_items(settings, settings.pop('SWAP', None))
self._import_from_string(settings, settings.pop('IMPORT_FROM_STRING', None))
for local_setting, name in self.registry.items():
local_setting.value = settings.get_dotted(name)
return settings
# Post-processing
def _interpolate_values(self, obj, settings):
all_interpolated = []
interpolated = []
while interpolated is not None:
all_interpolated.extend(interpolated)
obj, interpolated = self._interpolate_values_inner(obj, settings)
return obj, all_interpolated
def _interpolate_values_inner(self, obj, settings, _interpolated=None):
if _interpolated is None:
_interpolated = []
if isinstance(obj, string_types):
new_value, changed = self._inject(obj, settings)
if changed:
_interpolated.append((obj, new_value))
obj = new_value
elif isinstance(obj, Mapping):
for k, v in obj.items():
obj[k], _interpolated = self._interpolate_values_inner(v, settings, _interpolated)
elif isinstance(obj, MutableSequence):
for i, v in enumerate(obj):
obj[i], _interpolated = self._interpolate_values_inner(v, settings, _interpolated)
elif isinstance(obj, Sequence):
items = []
for v in obj:
item, _interpolated = self._interpolate_values_inner(v, settings, _interpolated)
items.append(item)
obj = obj.__class__(items)
return obj, _interpolated or None
def _interpolate_keys(self, obj, settings):
if isinstance(obj, Mapping):
replacements = {}
for k, v in obj.items():
if isinstance(k, string_types):
new_k, changed = self._inject(k, settings)
if changed:
replacements[k] = new_k
self._interpolate_keys(v, settings)
for k, new_k in replacements.items():
obj[new_k] = obj[k]
del obj[k]
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
for item in obj:
self._interpolate_keys(item, settings)
def _prepend_extras(self, settings, extras):
if not extras:
return
for name, extra_val in extras.items():
if not extra_val:
continue
current_val = settings.get_dotted(name)
if not isinstance(current_val, Sequence):
raise TypeError('PREPEND only works with list-type settings')
settings.set_dotted(name, extra_val + current_val)
def _append_extras(self, settings, extras):
if not extras:
return
for name, extra_val in extras.items():
if not extra_val:
continue
current_val = settings.get_dotted(name)
if not isinstance(current_val, Sequence):
raise TypeError('APPEND only works with list-type settings')
settings.set_dotted(name, current_val + extra_val)
def _swap_list_items(self, settings, swap):
if not swap:
return
for name, swap_map in swap.items():
if not swap_map:
continue
current_val = settings.get_dotted(name)
if not isinstance(current_val, Sequence):
raise TypeError('SWAP only works with list-type settings')
for old_item, new_item in swap_map.items():
k = current_val.index(old_item)
current_val[k] = new_item
def _import_from_string(self, settings, import_from_string):
if not import_from_string:
return
for name in import_from_string:
current_val = settings.get_dotted(name)
if isinstance(current_val, string_types):
settings.set_dotted(name, import_string(current_val))
def _inject(self, value, settings):
"""Inject ``settings`` into ``value``.
Go through ``value`` looking for ``{{NAME}}`` groups and replace
each group with the value of the named item from ``settings``.
Args:
value (str): The value to inject settings into
settings: An object that provides the dotted access interface
Returns:
(str, bool): The new value and whether the new value is
different from the original value
"""
assert isinstance(value, string_types), 'Expected str; got {0.__class__}'.format(value)
begin, end = '{{', '}}'
if begin not in value:
return value, False
new_value = value
begin_pos, end_pos = 0, None
len_begin, len_end = len(begin), len(end)
len_value = len(new_value)
while begin_pos < len_value:
# Find next {{.
begin_pos = new_value.find(begin, begin_pos)
if begin_pos == -1:
break
# Save everything before {{.
before = new_value[:begin_pos]
# Find }} after {{.
begin_pos += len_begin
end_pos = new_value.find(end, begin_pos)
if end_pos == -1:
raise ValueError('Unmatched {begin}...{end} in {value}'.format(**locals()))
# Get name between {{ and }}, ignoring leading and trailing
# whitespace.
name = new_value[begin_pos:end_pos]
name = name.strip()
if not name:
raise ValueError('Empty name in {value}'.format(**locals()))
# Save everything after }}.
after_pos = end_pos + len_end
try:
after = new_value[after_pos:]
except IndexError:
# Reached end of value.
after = ''
# Retrieve string value for named setting (the "injection
# value").
try:
injection_value = settings.get_dotted(name)
except KeyError:
raise KeyError('{name} not found in {settings}'.format(**locals()))
if not isinstance(injection_value, string_types):
injection_value = self.strategy.encode_value(injection_value)
# Combine before, inject value, and after to get the new
# value.
new_value = ''.join((before, injection_value, after))
# Continue after injected value.
begin_pos = len(before) + len(injection_value)
len_value = len(new_value)
return new_value, (new_value != value)
| PSU-OIT-ARC/django-local-settings | local_settings/loader.py | Python | mit | 10,650 |
import os
import sys
import time
import hashlib
import zlib
import random
import string
import subprocess as sb
import redis
import json
from collections import Counter
digestsize = 20
class RedisDataStore:
def __init__(self, loc, db=0):
self.conn = redis.StrictRedis(loc, db=db)
def post_experiment(self, jobhash, N, params):
"""
Sets (in order) the:
jobs:githashes
params:sources
experiments:times
then adds experiments to jobs:new
N: number of repeats requested
params: JSON param string
"""
r = self.conn
self.check_githash(jobhash)
if params.strip() == "" or params is None:
params = '{}'
# cleanedparams = yaml.dump(yaml.load(params)).strip()
print(params)
cleanedparams = json.dumps(json.loads(params)).strip()
cleanedparams = zlib.compress(cleanedparams)
paramhash = self.hash(cleanedparams)
exp = jobhash + '|' + paramhash
r.hset('params:sources', paramhash, cleanedparams)
r.hset('experiments:times', exp, r.time()[0])
r.lpush('jobs:new', *([exp]*N))
def check_githash(self, jobhash):
r = self.conn
if not os.path.exists('.git'):
return
githash = sb.check_output('git rev-parse HEAD'.split()).strip()
storedgithash = r.hget('jobs:githashes', jobhash)
if storedgithash is not None and githash != storedgithash:
print('ERROR: This jobfile has already been run ' +
'under a different version of the code.')
sys.exit(-1)
# githash = githash + ' + ' + storedgithash
r.hset('jobs:githashes', jobhash, githash)
def post_jobfile(self, source, desc):
"""
Posts job in jobs:sources
source: path to source or [partial] existing hash
desc: string description saved to jobs:descs
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:sources', jobhash):
print("WARNING: This jobfile has already been submitted.\n" +
"Modifying file and resubmitting.")
N = 12
rstr = "\n#" + ''.join(
random.choice(string.ascii_uppercase +
string.digits) for x in range(N))
if not os.path.exists(source):
print("ERROR: Cannot change source {} quiting.".format(source))
sys.exit(-1)
sb.check_call('echo "{}" >> {}'.format(rstr, source), shell=True)
jobhash = self.get_jobhash(source)
r.hset('jobs:sources', jobhash, self.get_jobfile_disk(source))
r.hset('jobs:descs', jobhash, desc)
r.hset('jobs:times', jobhash, r.time()[0])
print "Posted hash: %s" % jobhash[:8]
#if not os.path.exists('.exps'):
#os.makedirs('.exps')
#newfile = os.path.join('.exps', jobhash+'.py')
#if not os.path.exists(newfile):
#with open(newfile,'w') as fid:
#fid.write(zlib.decompress(self.get_jobfile(source)))
return jobhash
def describe_jobfile(self, source, desc):
""" Describes job in jobs:descs:<hash>
Needs r: redis object
source: path to source or [partial] existing hash
desc: short textual description.
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:descs', jobhash):
old_desc = r.hget('jobs:descs', jobhash)
if desc != old_desc:
print("Warning: This job already has description:")
cont = raw_input("Would you like to override? [y/n]: ")
if cont.upper().strip()[0] == 'Y':
print("Overwriting.")
else:
print("Exiting.")
sys.exit(0)
r.hset('jobs:descs', jobhash, desc)
def get_description(self, jobhash):
""" Gets job description in jobs:descs:<hash> """
return self.conn.hget('jobs:descs', jobhash)
def get_jobfile_disk(self, val):
""" Returns compressed source from file path"""
if os.path.exists(val):
with open(val,'r') as fid:
return zlib.compress(fid.read())
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobfile_db(self, val):
""" Returns compressed source from (partial) hash"""
r = self.conn
if len(val) == digestsize:
return r.hget('jobs:sources', val)
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return r.hget('jobs:sources', h)
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobhash(self, val):
""" Returns hash from file path or (partial) hash"""
if len(val) == digestsize and val.isalnum():
return val
if os.path.exists(val):
with open(val,'r') as fid:
return self.hash(fid.read())
r = self.conn
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return h
sys.exit('Could not find valid hash that began with hash %s' % val)
def get_params(self, phash):
""" Returns value of the parameter hash from params:sources """
return zlib.decompress(self.conn.hget('params:sources', phash))
def hash(self, data):
return hashlib.sha1(data).hexdigest()
def kill_workers(self):
r = self.conn
if r.zcard('workers:hb') == 0:
print 'No living clients to kill.'
sys.exit(0)
assert not r.exists('workers:stop')
r.set('workers:stop','ALL')
print('Waiting for all workers to stop...')
try:
num = r.zcard('workers:hb')
while num > 0:
print("...%d workers remaining." % num)
time.sleep(1)
num = r.zcard('workers:hb')
print("All workers stopped.")
except KeyboardInterrupt:
print("Stopping")
finally:
r.delete('workers:stop')
def job_status(self, argv):
r = self.conn
if len(argv) == 3:
verbose=True
else:
verbose=False
new = r.llen('jobs:new') or '0'
working = r.llen('jobs:working') or '0'
done = r.get('jobs:numdone') or '0'
failed = r.get('jobs:failed') or '0'
if not verbose:
print("\t%s jobs pending\n\t%s running\n\t%s completed\n\t%s failed"%
(new, working, done, failed))
else:
print("Pending jobs (%s):" % new)
joblist = r.lrange('jobs:new', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nIn-progress jobs (%s):"% working)
joblist = r.lrange('jobs:working', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nDone jobs (%s)" % done)
#keys = r.keys('jobs:done:*')
#for k in sorted(keys):
#print('\t%4s: %s' % (r.llen(k),k.split(':')[-1][:8]))
print("\nFailed jobs (%s)" % failed)
def worker_status(self, argv):
r = self.conn
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if len(argv) == 3:
verbose=True
else:
verbose=False
if num == 0:
print('There are currently no clients alive.')
elif not verbose:
print("There are %d clients alive." % num)
else:
print("The %d clients alive are:" % num)
curr_time = r.time()
for x in clients:
cl = x #js.loads(zlib.decompress(x))
print '\t{0:<15} with hb {1:3.1f} seconds ago'\
.format(cl, curr_time[0] + (curr_time[1]*1e-6) - int(r.zscore('workers:hb',x)))
def select_jobfile(self, sel=None, fullhashes=False):
return self.select_jobfiles(sel, fullhashes)[0]
def select_jobfiles(self, sel=None, fullhashes=False):
r = self.conn
hashes = sorted(r.hkeys('jobs:sources'), key=lambda x: int(r.hget('jobs:times', x) or '0'))
if sel is None:
for i, d in enumerate(hashes):
desc = r.hget('jobs:descs', d) or ''
if fullhashes:
print "%4d. %s %s" % (i, d, desc)
else:
print "%4d. %s %s" % (i, d[:5], desc)
sel = raw_input("Choose a dataset or range of datasets or 'q' to exit: ")
sel = [x.strip() for x in sel.split('-')]
if len(sel) == 1:
if not sel[0].isdigit() or int(sel[0]) not in range(i+1):
sys.exit()
a = b = int(sel[0])
else:
a,b = int(sel[0]), int(sel[1])
else:
a,b = sel, sel
return [hashes[i] for i in range(a,b+1)]
def clean_jobfiles(self):
for res in self.select_jobfiles():
self.conn.hdel('jobs:descs', res)
self.conn.hdel('jobs:sources', res)
self.conn.hdel('jobs:times', res)
self.conn.hdel('jobs:githashes', res)
def gc(self):
r = self.conn
r.delete('jobs:failed')
r.delete('jobs:numdone')
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if num == 0:
r.delete('jobs:working')
print("Done!")
def push_heartbeat(self, idstring):
self.conn.zadd('workers:hb', self.conn.time()[0], idstring)
def remove_heartbeat(self, idstring):
self.conn.zrem('workers:hb', idstring)
def query_stop(self, host):
cmd = self.conn.get('workers:stop')
if cmd == 'ALL' or cmd == host:
return True
else:
return False
def remove_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
def reload_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
if exp is not None:
self.conn.lpush('jobs:new', exp)
def poll_work(self):
return self.conn.rpoplpush('jobs:new', 'jobs:working')
def job_fail(self):
self.conn.incr('jobs:failed')
def job_succeed(self):
self.conn.incr('jobs:numdone')
| binarybana/jobmon | jobmon/redisbackend.py | Python | mit | 10,622 |
import fileinput
import os
import re
import sys
import shutil
import time
from conans import ConanFile, tools
class MSYS2(ConanFile):
name = "MSYS2"
version = "2016.10.25"
license = "various, http://www.msys2.org"
url = "http://www.msys2.org"
settings = {"os": ["Windows"]}
def checkMsysSetupFinished(self, logfile, token):
try:
for line in fileinput.input(logfile):
if re.search(token, line):
return True
except:
pass
return False
def countdown(self, t, message, abortCheckFunction=None):
line = ""
while t:
mins, secs = divmod(t, 60)
timeformat = "{:02d}:{:02d}".format(mins, secs)
line = message + timeformat
print(line, end="\r")
time.sleep(1)
t -= 1
if abortCheckFunction:
# additional waiting time when abort check function resolves to True
if abortCheckFunction():
t = 10
print(" " * len(line), end="\r")
def getEnvPathSuffix(self, rootDir):
suffix = ""
msysRootDirRelative = os.path.dirname(self.getMsysCmdFileRelative(rootDir))
msysRootDirAbsolute = os.path.join(rootDir, msysRootDirRelative)
suffix += ";" + msysRootDirAbsolute
msysUsrBinDirRelative = os.path.join(msysRootDirRelative, "usr", "bin")
msysUsrBinDirAbsolute = os.path.join(rootDir, msysUsrBinDirRelative)
suffix += ";" + msysUsrBinDirAbsolute
return suffix
def getIntoFolder(self, url, label, subdir=True):
result = self.prepareDownload(url, label)
tools.download(url, result["filename"], retry=3, retry_wait=10)
directory = ""
if subdir:
directory = os.path.join(os.getcwd(), result["name"])
if result["extension"] == ".xz":
tools.untargz(result["filename"], directory)
else:
tools.unzip(result["filename"], directory)
os.unlink(result["filename"])
return result
def getMsysCmdFileRelative(self, rootDir):
msysFileRelative = os.path.join(self.name, "msys2_shell.cmd")
msysFile = os.path.join(rootDir, msysFileRelative)
if not os.path.exists(msysFile):
raise Exception("failed to locate MSYS2 command file in " + msysFile)
return msysFileRelative
def getSubdirectories(self, d):
return [ f for f in os.listdir(d) if os.path.isdir(f) ]
def prepareDownload(self, url, label):
self.output.info("")
self.output.info("processing " + label + " ...")
filename = os.path.basename(url)
name = os.path.splitext(filename)[0]
extension = os.path.splitext(filename)[1]
return { "name": name, "filename": filename, "extension": extension }
def build(self):
self.output.info("")
self.output.info("---------- build ----------")
self.output.info("")
url = "http://repo.msys2.org/distrib/i686/msys2-base-i686-20161025.tar.xz"
self.getIntoFolder(url, self.name, False)
dirnames = self.getSubdirectories(".")
if len(dirnames) < 1:
raise Exception("archive does not contain any subdirectories")
os.rename(dirnames[0], self.name)
def package(self):
self.output.info("")
self.output.info("---------- package ----------")
self.output.info("")
rootDir = os.getcwd()
# copy the directory from build dir to package dir
cmdFile = self.getMsysCmdFileRelative(rootDir)
cmdDirName = os.path.dirname(cmdFile)
# self.copy does not copy empty directories which leads to problems when calling MSYS2 setup because of a missing /tmp directory
# self.copy("*", dst=cmdDirName, src=cmdDirName)
src = os.path.join(rootDir, cmdDirName)
dst = os.path.join(self.package_folder, cmdDirName)
shutil.copytree(src, dst)
rootDir = self.package_folder
# patch the MSYS2 command file in the package directory to be able to determine if the setup is finished by logging the console output to a file
cmdFileAbsolute = self.getMsysCmdFileRelative(rootDir)
cmdFileAbsolute = os.path.join(rootDir, cmdFileAbsolute)
logfile = "temp.log"
for line in fileinput.input(cmdFileAbsolute, inplace=True):
line = re.sub(r"(-i /msys2.ico)", r"\1 -l tmp/" + logfile, line)
sys.stdout.write(line)
# set path environment variable to be able to call MSYS2 and pacman
pathSuffix = self.getEnvPathSuffix(rootDir)
os.environ["path"] += ";" + pathSuffix
rootDir = os.path.join(rootDir, cmdDirName)
cmdFile = os.path.basename(cmdFile)
logfile = os.path.join(rootDir, "tmp", logfile)
with tools.chdir(rootDir):
self.run(cmdFile + " \"\"")
# check if the MSYS2 setup finished
self.countdown(120, "remaining grace period for MSYS2 setup: ", (lambda: self.checkMsysSetupFinished(logfile, "--> Installing /usr/share/info/which.info.gz ... done")))
try:
os.unlink(logfile)
except:
pass
self.run("pacman --noconfirm -S make")
self.run("pacman --noconfirm -S perl")
self.run("pacman --noconfirm -S yasm")
# create empty include directory to avoid Conan CMake error "Imported target [...] includes non-existent path [...] /include" when using this package in a CMake project
os.makedirs(os.path.join(self.package_folder, "include"))
def package_info(self):
self.output.info("")
self.output.info("---------- package_info ----------")
self.output.info("")
self.env_info.path.append(os.path.join(self.package_folder, self.name))
self.env_info.path.append(os.path.join(self.package_folder, self.name, "usr", "bin")) | ConnectedVision/connectedvision | build_env/Conan/packages/MSYS2/2016.10.25/2.3.0/conanfile.py | Python | mit | 5,291 |
def application(environment, start_response):
data = "Hello Mundo!\n"
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data]) | humanzilla/wsgi-listenme | tests/wsgi.py | Python | mit | 231 |
"""
Class incorporating the meta information that we know of a row or column.
"""
import dataclasses
import typing
@dataclasses.dataclass
class Line():
"""
Class incorporating the meta information that we know of a row or column.
"""
# pylint: disable=too-few-public-methods
size: int
idx: int
blocks: typing.List[typing.Any]
def __str__(self):
str_ = "{!s}, size: {!s}, blocks: [".format(self.idx, self.size)
return str_ + "; ".join((str(block) for block in self.blocks)) + "]"
class Row(Line):
"""
Class incorporating the information that we know of a row.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *args):
self.is_row = True
super(Row, self).__init__(*args)
def __str__(self):
return "row: " + super(Row, self).__str__()
class Column(Line):
"""
Class incorporating the information that we know of a column.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *args):
self.is_row = False
super(Column, self).__init__(*args)
def __str__(self):
return "col: " + super(Column, self).__str__()
| durante987/nonogram_solver | nonogram/raster/line.py | Python | mit | 1,185 |
"""
Filename: calc_amoc_metric.py
Author: Damien Irving, [email protected]
Description: Calculate the AMOC metric
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
from iris.experimental.equalise_cubes import equalise_attributes
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import timeseries
import spatial_weights
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
history = []
basin_index = {'pacific': 1,
'atlantic': 0,
'globe': 2}
def save_history(cube, field, filename):
"""Save the history attribute when reading the data.
(This is required because the history attribute differs between input files
and is therefore deleted upon equilising attributes)
"""
history.append(cube.attributes['history'])
def main(inargs):
"""Run the program."""
region_constraint = iris.Constraint(region='atlantic_arctic_ocean') # "atlantic_arctic_ocean", "indian_pacific_ocean ", "global_ocean "
cube = iris.load(inargs.infiles, 'ocean_meridional_overturning_mass_streamfunction' & region_constraint, callback=save_history)
if not cube:
cube = iris.load(inargs.infiles, 'ocean_meridional_overturning_mass_streamfunction', callback=save_history)
equalise_attributes(cube)
cube = cube.concatenate_cube()
cube = cube[:, 0, : ,:] # index for Atlantic
else:
equalise_attributes(cube)
cube = cube.concatenate_cube()
cube.remove_coord('region')
cube = gio.check_time_units(cube)
cube = timeseries.convert_to_annual(cube)
target_lat, error = uconv.find_nearest(cube.coord('latitude').points, 30, index=False)
cube = cube.extract(iris.Constraint(latitude=target_lat))
cube.remove_coord('latitude')
assert str(cube.units) == 'kg s-1'
cube.data = (cube.data / 1023) / 1e+6
cube.units = 'Sv'
#dim_coord_names = [coord.name() for coord in cube.dim_coords]
#vert_extents = spatial_weights.calc_vertical_weights_1D(cube.coord('depth'), dim_coord_names, cube.shape)
metric = cube.collapsed('depth', iris.analysis.MAX)
metric.remove_coord('depth')
try:
metric.attributes['history'] = gio.write_metadata(file_info={inargs.infiles[0]: cube.attributes['history']})
except KeyError:
pass
iris.save(metric, inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, [email protected]
"""
description = 'Calculate the AMOC metric (annual-mean maximum volume transport streamfunction at 30N)'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infiles", type=str, nargs='*', help="Input msftmyz files")
parser.add_argument("outfile", type=str, help="Output file")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | data_processing/metrics/calc_amoc_metric.py | Python | mit | 3,567 |
params = {} # default params
# :param 'colors': the number of colors to use for colorings
# :type 'colors': int
def run(graph, setup, params):
"""
Searches the graph using backtracking
:param setup: the setup for this algorithm
:type setup: Setup
:param problem: The graph to color
:type problem: list[list[int]]
:param num_colors: number of colors to try and color with
:type num_colors: int
:rtype: dict
:return: the colored graph
"""
num_colors = params['colors']
coloring, avail_colors, stack = init(graph, num_colors)
while True:
if len(stack) == 0 or complete(coloring, graph):
if setup:
setup.logger.debug(
'Finished, final coloring: {}'.format(coloring)
)
yield coloring
cur_node = stack[len(stack) - 1][1]
coloring[cur_node] = stack[len(stack) - 1][3]
if setup:
if setup.counter.increment():
if setup:
setup.logger.debug(
"Preempted with coloring:{}".format(coloring)
)
yield coloring
setup.logger.debug(
'Finished, final coloring: {}'.format(coloring)
)
choose_next_node(
stack, coloring, graph, avail_colors, num_colors, setup
)
def init(graph, num_colors):
"""
Initializes the backtracking algorithm
:param graph: graph to color
:type graph: list[list[int]]
:param num_colors: the number of colors to color with
:type num_colors: int
:rtype: tuple
:return: the inital states of coloring, avail_colors, and stack
"""
avail_colors = []
for _ in xrange(len(graph)):
avail_colors.append(set(xrange(num_colors)))
coloring = dict()
stack = list()
choose_next_node(stack, coloring, graph, avail_colors, num_colors)
return (
coloring,
avail_colors,
stack
)
def choose_next_node(
stack, coloring, graph, avail_colors, num_colors, setup=None):
"""
Chooses the next node and its coloring and adds it to the stack
:param setup: the setup with the counter and the logger
:type setup: Setup
:param stack: the current stack of the program
:type stack: list[dict{int:int}, int, set{int}, int}]
:param coloring: the current coloring of the program
:type coloring: dict{int:int}
:param graph: the current graph to color
:type graph: list[list[int]]
:param avail_colors: the available colors to color with
:type avail_colors: set{int}
:param num_colors: the number of colors to color with
:type num_colors: int
:rtype: bool
:return: True if it picks the next node and False if it doesn't
"""
keep_choosing = True
while keep_choosing:
keep_choosing = False
next_node = (
min_remaining_var(coloring, graph)
)
while next_node is None:
if len(stack) == 0:
if setup:
setup.logger.debug('Stack is empty')
return
if setup:
setup.logger.debug('About to backtrack..')
setup.logger.debug('Current stack is {}'
.format(stack))
stack.pop()
if setup:
setup.logger.debug('Just backtracked..')
setup.logger.debug('Current stack is {}'
.format(stack))
next_node = (
min_remaining_var(coloring, graph)
)
chosen_color = min_color_conflicts(
avail_colors, graph, next_node, num_colors
)
avail_colors[next_node] -= {chosen_color}
nodes_to_check = [(node, next_node) for node in graph[next_node]]
while len(nodes_to_check) > 0:
node, prev_node = nodes_to_check.pop(0)
if len(avail_colors[prev_node]) == 1:
coloring[prev_node] = list(avail_colors[prev_node])[0]
avail_colors[node] -= coloring[prev_node]
if setup:
setup.logger.debug('Doing the MAC case')
setup.logger.debug(
"""
Removing color : {} from node: {}'s color choices
because it is node: {}'s only color choice
""".format(coloring[prev_node], node, prev_node)
)
if setup:
setup.logger.debug('About to add to MAC queue..')
setup.logger.debug('Cur queue: {}'.format(nodes_to_check))
nodes_to_check.extend(
(temp_node, node)
for temp_node in graph[node]
if temp_node != prev_node
)
if setup:
setup.logger.debug("Added to MAC queue..")
setup.logger.debug('Cur queue: {}'.format(nodes_to_check))
elif avail_colors[node] == 0:
if setup:
setup.logger.debug(
"""
MAC found a coloring that won't work,
trying new coloring
"""
)
keep_choosing = True
break
if len(avail_colors[next_node]) > 0:
if setup:
setup.logger.debug('About to add to stack..')
setup.logger.debug('Current stack: {}'.format(stack))
stack.append(
[
coloring,
next_node,
avail_colors[next_node],
chosen_color
]
)
if setup:
setup.logger.debug('Added to stack..')
setup.logger.debug('Current stack: {}'.format(stack))
else:
if setup:
setup.logger.debug('About to backtrack..')
setup.logger.debug('Current stack: {}'.format(stack))
coloring[next_node] = chosen_color
stack.pop()
if setup:
if setup.counter.increment():
setup.logger.debug("Didn't finish, final coloring: {}"
.format(coloring))
# Todo yield coloring, it breaks the function for somereason
setup.logger.debug('Just backtracked..')
setup.logger.debug('Current stack: {}'.format(stack))
def min_color_conflicts(avail_colors, graph, cur_node, num_color):
"""
Returns the color with the least number of conflicts
:param graph: the graph to color
:type graph: [(int, {int})]
:param cur_node: index of the not you are attempting to color
:type cur_node: int
:param num_color: number of colors we are using
:type num_color: int
:rtype: int
:return: the color that causes the lease number of conflict
"""
available_color_count = [[0, i] for i in xrange(num_color)]
all_colors = set(range(num_color))
for node in graph[cur_node]:
available_colors = (
avail_colors[node] - (all_colors - avail_colors[cur_node])
)
for color in available_colors:
available_color_count[color][0] = 1
return max(available_color_count)[1]
def complete(coloring, graph):
"""
Checks if the problem is solved
:param coloring: the coloring dictionary
:type coloring: dict{int: int}
:param graph: the graph to color
:type graph: [[(int, {int}]]
:rtype: bool
:return: True if problem is solved false otherwise
"""
return len(coloring) == len(graph)
def min_remaining_var(coloring, graph):
"""
Finds the minimum remaining variable in the graph
- The node connected to the most nodes with colors
:param coloring: list of chosen colors
:type coloring: dict{int : int}
:param graph: graph to color
:type graph: [[(int, {int}]]
:rtype: int
:return: the index of the MRV
"""
num_neighbors_colored = list()
for index, adj_list in enumerate(graph):
colored_neighbors = set()
if coloring.get(index) is not None:
continue
for node in adj_list:
if coloring.get(node) is not None:
colored_neighbors.add(coloring.get(node))
num_neighbors_colored.append(
(len(colored_neighbors), len(adj_list), index)
)
return max(num_neighbors_colored)[2] if num_neighbors_colored else None
if __name__ == '__main__':
from ai_graph_color import problem_generator
from ai_graph_color import setup
generated_problem = problem_generator.generate_graph(100)
print generated_problem
print (
run(generated_problem, setup.Evaluation(), {'colors': 4}).next()
)
| sagersmith8/ai_graph_coloring | ai_graph_color/algorithms/backtracking_mac.py | Python | mit | 8,854 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@fn ble_parser.py
@author Stephen Finucane, 2013-2014
@email [email protected]
@about Version of simple-demo.py that formats the output to the
terminal.
"""
from __future__ import print_function
import collections
from pyblehci import BLEBuilder
from pyblehci import BLEParser
def pretty(hex_string, seperator=None):
"""
Prettify a hex string.
>>> pretty("\x01\x02\x03\xff")
'01 02 03 FF'
"""
hex_string = hex_string.encode('hex')
out = ''
for i in range(len(hex_string)):
if not i % 2:
out = out + seperator
out = out + hex_string[i].capitalize()
return out
def print_ordered_dict(dictionary):
result = ""
for key in dictionary:
if dictionary[key]:
#convert e.g. "data_len" -> "Data Len"
title = ' '.join(key.split("_")).title()
if isinstance(dictionary[key], list):
for idx2, _ in enumerate(dictionary[key]):
result += "{0} ({1})\n".format(title, idx2)
result += print_ordered_dict(dictionary[key][idx2])
elif isinstance(dictionary[key], type(collections.OrderedDict())):
result += '{0}\n{1}'.format(title, print_ordered_dict(
dictionary[key]))
else:
result += "{0:15}\t: {1}\n\t\t ({2})\n".format(
title, pretty(dictionary[key][0], ':'), dictionary[key][1])
else:
result += "{0:15}\t: None".format(key)
return result
def print_output((packet, dictionary)):
result = print_ordered_dict(dictionary)
result += 'Dump:\n{0}\n'.format(pretty(packet))
return result
def test_builder():
ble_builder = BLEBuilder()
print(print_output(ble_builder._build_command("fe00")))
print(print_output(ble_builder._build_command("fe31", param_id="\x15")))
print(print_output(ble_builder._build_command("fe04", mode="\x03")))
print(print_output(ble_builder._build_command(
"fe09", peer_addr="\x57\x6A\xE4\x31\x18\x00")))
print(print_output(ble_builder._build_command(
"fd8a", conn_handle="\x00\x00", handle="\x27\x00")))
print(print_output(ble_builder._build_command(
"fe0a", conn_handle="\x00\x00")))
def test_parser():
ble_parser = BLEParser()
print(print_output(ble_parser._split_response(
"\x04\xFF\x2C\x00\x06\x00\x06\x30\x85\x31\x18\x00\x1B\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x09\x09\x09\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x09\x09\x09\x00\x00")))
print(print_output(ble_parser._split_response(
"\x04\xFF\x08\x7F\x06\x00\x31\xFE\x02\xD0\x07")))
print(print_output(ble_parser._split_response(
"\x04\xFF\x0C\x01\x06\x00\x01\x00\x00\x57\x6A\xE4\x31\x18\x00")))
print(print_output(ble_parser._split_response(
"\x04\xFF\x14\x01\x06\x00\x01\x00\x00\x57\x6A\xE4\x31\x18\x00\x11\x11\x11\x11\x11\x11\x11\x11")))
print(print_output(ble_parser._split_response(
"\x04\xFF\x07\x01\x06\x00\x00")))
if __name__ == "__main__":
test_builder()
test_parser()
| stephenfin/pyblehci | examples/simple-formatted-demo.py | Python | mit | 3,178 |
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from common.views import get_session
from experiment_event.models import Event
from experiment_session.models import STATUS_IN_PROGRESS
numberToCode = {i: 'bp' + str(i) for i in range(1, 11)}
numberToCode[-1] = 'ibp'
@api_view()
def report_begin_display(request):
try:
session = get_session(request)
current_lightset = session.get_current_lightset()
Event.objects.create(combination=current_lightset, eventtype=Event.TYPE_COMBINATION_STARTED)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
return Response('OK')
@api_view()
def report_finish_display(request):
try:
session = get_session(request)
current_lightset = session.get_current_lightset()
Event.objects.create(combination=current_lightset, eventtype=Event.TYPE_COMBINATION_FINISHED)
current_lightset.finish()
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
session.refresh_from_db()
if session.status == STATUS_IN_PROGRESS:
return Response('OK')
else:
return Response('FIN')
@api_view()
def report_button_press(request):
try:
session = get_session(request)
current_lightset = session.get_current_lightset()
code = get_event_code_from_request(request)
Event.objects.create(combination=current_lightset, eventtype=code)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
return Response('OK')
def get_event_code_from_request(request):
if 'number' not in request.GET:
raise Exception('Missing "number" field')
try:
return numberToCode[int(request.GET['number'])]
except ValueError:
raise Exception('"number" must be an integer')
except KeyError:
raise Exception('Unknown key number')
| piotrb5e3/1023alternative-backend | experiment_event/views.py | Python | mit | 2,000 |
"""."""
from copy import deepcopy as _dcopy
from mathphys.functions import get_namedtuple as _get_namedtuple
import numpy as np
import pyaccel
from ..orbcorr import OrbRespmat
class CouplingCorr():
"""."""
CORR_STATUS = _get_namedtuple('CorrStatus', ['Fail', 'Sucess'])
CORR_METHODS = _get_namedtuple('CorrMethods', ['Orbrespm'])
def __init__(self, model, acc, dim='4d',
skew_list=None, correction_method=None):
"""."""
self.model = model
self.acc = acc
self.dim = dim
self._corr_method = CouplingCorr.CORR_METHODS.Orbrespm
self.coup_matrix = []
self.respm = OrbRespmat(model=self.model, acc=self.acc, dim=self.dim)
self.bpm_idx = self.respm.fam_data['BPM']['index']
if skew_list is None:
self.skew_idx = self.respm.fam_data['QS']['index']
else:
self.skew_idx = skew_list
self._corr_method = correction_method
@property
def corr_method(self):
"""."""
return self._corr_method
@corr_method.setter
def corr_method(self, value):
if value is None:
return
if isinstance(value, str):
self._corr_method = int(
value not in CouplingCorr.CORR_METHODS._fields[0])
elif int(value) in CouplingCorr.CORR_METHODS:
self._corr_method = int(value)
@property
def corr_method_str(self):
"""."""
return CouplingCorr.CORR_METHODS._fields[self._corr_method]
@property
def _nbpm(self):
return len(self.bpm_idx)
@property
def _nch(self):
return len(self.respm.fam_data['CH']['index'])
@property
def _ncv(self):
return len(self.respm.fam_data['CV']['index'])
@property
def _nskew(self):
return len(self.skew_idx)
def calc_jacobian_matrix(self, model=None):
"""Coupling correction response matrix.
Calculates the variation of off-diagonal elements of orbit response
matrix and vertical dispersion given the variation of skew
quadrupoles strength.
"""
if model is None:
model = self.model
nvec = self._nbpm * (self._nch + self._ncv + 1)
self.coup_matrix = np.zeros((nvec, len(self.skew_idx)))
delta = 1e-6
for idx, nmag in enumerate(self.skew_idx):
modcopy = _dcopy(model)
dlt = delta/len(nmag)
for seg in nmag:
modcopy[seg].KsL += dlt
elem = self._get_coupling_residue(modcopy) / dlt
self.coup_matrix[:, idx] = elem
return self.coup_matrix
def _get_coupling_residue(self, model, weight_dispy=False):
self.respm.model = model
orbmat = self.respm.get_respm()
twi, *_ = pyaccel.optics.calc_twiss(model)
dispy = twi.etay[self.bpm_idx]
if weight_dispy:
dispy *= (self._nch + self._ncv)*10
mxy = orbmat[:self._nbpm, self._nch:-1]
myx = orbmat[self._nbpm:, :self._nch]
res = mxy.ravel()
res = np.hstack((res, myx.ravel()))
res = np.hstack((res, dispy.ravel()))
return res
def get_ksl(self, model=None, skewidx=None):
"""Return skew quadrupoles strengths."""
if model is None:
model = self.model
if skewidx is None:
skewidx = self.skew_idx
ksl_mag = []
for mag in skewidx:
ksl_seg = []
for seg in mag:
ksl_seg.append(model[seg].KsL)
ksl_mag.append(sum(ksl_seg))
return np.array(ksl_mag)
def _set_delta_ksl(self, model=None, skewidx=None, delta_ksl=None):
"""Set skew quadrupoles strengths in the model."""
if model is None:
model = self.model
if skewidx is None:
skewidx = self.skew_idx
if delta_ksl is None:
raise Exception('Missing Delta KsL values')
for idx_mag, mag in enumerate(skewidx):
delta = delta_ksl[idx_mag]/len(mag)
for seg in mag:
model[seg].KsL += delta
@staticmethod
def get_figm(res):
"""Calculate figure of merit from residue vector."""
return np.sqrt(np.sum(res*res)/res.size)
def coupling_corr_orbrespm_dispy(self,
model,
jacobian_matrix=None,
nsv=None, nr_max=10, tol=1e-6,
res0=None):
"""Coupling correction with orbrespm.
Calculates the pseudo-inverse of coupling correction matrix via SVD
and minimizes the residue vector [Mxy, Myx, Etay].
"""
if jacobian_matrix is None:
jmat = self.calc_jacobian_matrix(model)
umat, smat, vmat = np.linalg.svd(jmat, full_matrices=False)
ismat = 1/smat
ismat[np.isnan(ismat)] = 0
ismat[np.isinf(ismat)] = 0
if nsv is not None:
ismat[nsv:] = 0
ismat = np.diag(ismat)
ijmat = -np.dot(np.dot(vmat.T, ismat), umat.T)
if res0 is None:
res = self._get_coupling_residue(model)
else:
res = res0
bestfigm = CouplingCorr.get_figm(res)
if bestfigm < tol:
return CouplingCorr.CORR_STATUS.Sucess
for _ in range(nr_max):
dksl = np.dot(ijmat, res)
self._set_delta_ksl(model=model, delta_ksl=dksl)
res = self._get_coupling_residue(model)
figm = CouplingCorr.get_figm(res)
diff_figm = np.abs(bestfigm - figm)
if figm < bestfigm:
bestfigm = figm
if diff_figm < tol:
break
else:
return CouplingCorr.CORR_STATUS.Fail
return CouplingCorr.CORR_STATUS.Sucess
def coupling_correction(self,
model,
jacobian_matrix=None,
nsv=None, nr_max=10, tol=1e-6,
res0=None):
"""Coupling correction method selection.
Methods available:
- Minimization of off-diagonal elements of orbit response matrix and
vertical dispersion.
"""
if self.corr_method == CouplingCorr.CORR_METHODS.Orbrespm:
result = self.coupling_corr_orbrespm_dispy(
model=model, jacobian_matrix=jacobian_matrix,
nsv=nsv, nr_max=nr_max, tol=tol, res0=res0)
else:
raise Exception('Chosen method is not implemented!')
return result
| lnls-fac/apsuite | apsuite/optics_analysis/coupling_correction.py | Python | mit | 6,638 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-14 22:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rii_Api', '0006_auto_20160914_1746'),
]
operations = [
migrations.AlterField(
model_name='player',
name='playerBio',
field=models.TextField(default='There is no bio yet for this player.', max_length=2000),
),
]
| SimonHerrera/rock-island-independents | api/rii_Api/migrations/0007_auto_20160914_1747.py | Python | mit | 502 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AlertsOperations(object):
"""AlertsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version. Constant value: "2019-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-03-01"
self.config = config
def list_by_data_box_edge_device(
self, device_name, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the alerts for a data box edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Alert
:rtype:
~azure.mgmt.edgegateway.models.AlertPaged[~azure.mgmt.edgegateway.models.Alert]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AlertPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AlertPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts'}
def get(
self, device_name, name, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets an alert by name.
:param device_name: The device name.
:type device_name: str
:param name: The alert name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Alert or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.edgegateway.models.Alert or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Alert', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts/{name}'}
| Azure/azure-sdk-for-python | sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/operations/alerts_operations.py | Python | mit | 7,620 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0029_auto_20160628_1954'),
]
operations = [
migrations.RemoveField(
model_name='fieldrule',
name='is_accessible',
),
migrations.AddField(
model_name='fieldrule',
name='field_action',
field=models.CharField(default='VIEW', max_length=255, choices=[(b'view', b'View'), (b'update', b'Update')]),
preserve_default=False,
),
]
| upconsulting/IsisCB | isiscb/isisdata/migrations/0030_auto_20160628_2057.py | Python | mit | 630 |
"""
Delaney dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem
def load_delaney(featurizer='ECFP', split='index'):
"""Load delaney datasets."""
# Featurize Delaney dataset
print("About to featurize Delaney dataset.")
if "DEEPCHEM_DATA_DIR" in os.environ:
data_dir = os.environ["DEEPCHEM_DATA_DIR"]
else:
data_dir = "/tmp"
dataset_file = os.path.join(data_dir, "delaney-processed.csv")
if not os.path.exists(dataset_file):
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/delaney-processed.csv'
)
delaney_tasks = ['measured log solubility in mols per litre']
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
loader = deepchem.data.CSVLoader(
tasks=delaney_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset)
]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return delaney_tasks, (train, valid, test), transformers
| joegomes/deepchem | deepchem/molnet/load_function/delaney_datasets.py | Python | mit | 1,852 |
Subsets and Splits