repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
wistful/pympris
pympris/Player.py
Player.SetPosition
def SetPosition(self, track_id, position): """Sets the current track position in microseconds. :param str track_id: The currently playing track's identifier. :param int position: Track position in microseconds. This must be between 0 and <track_length>. If the Position argument is less than 0, do nothing. If the Position argument is greater than the track length, do nothing. If the CanSeek property is false, this has no effect. """ self.iface.SetPosition(convert2dbus(track_id, 'o'), convert2dbus(position, 'x'))
python
def SetPosition(self, track_id, position): """Sets the current track position in microseconds. :param str track_id: The currently playing track's identifier. :param int position: Track position in microseconds. This must be between 0 and <track_length>. If the Position argument is less than 0, do nothing. If the Position argument is greater than the track length, do nothing. If the CanSeek property is false, this has no effect. """ self.iface.SetPosition(convert2dbus(track_id, 'o'), convert2dbus(position, 'x'))
[ "def", "SetPosition", "(", "self", ",", "track_id", ",", "position", ")", ":", "self", ".", "iface", ".", "SetPosition", "(", "convert2dbus", "(", "track_id", ",", "'o'", ")", ",", "convert2dbus", "(", "position", ",", "'x'", ")", ")" ]
Sets the current track position in microseconds. :param str track_id: The currently playing track's identifier. :param int position: Track position in microseconds. This must be between 0 and <track_length>. If the Position argument is less than 0, do nothing. If the Position argument is greater than the track length, do nothing. If the CanSeek property is false, this has no effect.
[ "Sets", "the", "current", "track", "position", "in", "microseconds", "." ]
4bd64a1f0d151f2adfc392ab34fd9b38894786cb
https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/Player.py#L79-L91
train
inveniosoftware-contrib/json-merger
json_merger/comparator.py
BaseComparator.process_lists
def process_lists(self): """Do any preprocessing of the lists.""" for l1_idx, obj1 in enumerate(self.l1): for l2_idx, obj2 in enumerate(self.l2): if self.equal(obj1, obj2): self.matches.add((l1_idx, l2_idx))
python
def process_lists(self): """Do any preprocessing of the lists.""" for l1_idx, obj1 in enumerate(self.l1): for l2_idx, obj2 in enumerate(self.l2): if self.equal(obj1, obj2): self.matches.add((l1_idx, l2_idx))
[ "def", "process_lists", "(", "self", ")", ":", "for", "l1_idx", ",", "obj1", "in", "enumerate", "(", "self", ".", "l1", ")", ":", "for", "l2_idx", ",", "obj2", "in", "enumerate", "(", "self", ".", "l2", ")", ":", "if", "self", ".", "equal", "(", "obj1", ",", "obj2", ")", ":", "self", ".", "matches", ".", "add", "(", "(", "l1_idx", ",", "l2_idx", ")", ")" ]
Do any preprocessing of the lists.
[ "Do", "any", "preprocessing", "of", "the", "lists", "." ]
adc6d372da018427e1db7b92424d3471e01a4118
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/comparator.py#L45-L50
train
inveniosoftware-contrib/json-merger
json_merger/comparator.py
BaseComparator.get_matches
def get_matches(self, src, src_idx): """Get elements equal to the idx'th in src from the other list. e.g. get_matches(self, 'l1', 0) will return all elements from self.l2 matching with self.l1[0] """ if src not in ('l1', 'l2'): raise ValueError('Must have one of "l1" or "l2" as src') if src == 'l1': target_list = self.l2 else: target_list = self.l1 comparator = { 'l1': lambda s_idx, t_idx: (s_idx, t_idx) in self.matches, 'l2': lambda s_idx, t_idx: (t_idx, s_idx) in self.matches, }[src] return [(trg_idx, obj) for trg_idx, obj in enumerate(target_list) if comparator(src_idx, trg_idx)]
python
def get_matches(self, src, src_idx): """Get elements equal to the idx'th in src from the other list. e.g. get_matches(self, 'l1', 0) will return all elements from self.l2 matching with self.l1[0] """ if src not in ('l1', 'l2'): raise ValueError('Must have one of "l1" or "l2" as src') if src == 'l1': target_list = self.l2 else: target_list = self.l1 comparator = { 'l1': lambda s_idx, t_idx: (s_idx, t_idx) in self.matches, 'l2': lambda s_idx, t_idx: (t_idx, s_idx) in self.matches, }[src] return [(trg_idx, obj) for trg_idx, obj in enumerate(target_list) if comparator(src_idx, trg_idx)]
[ "def", "get_matches", "(", "self", ",", "src", ",", "src_idx", ")", ":", "if", "src", "not", "in", "(", "'l1'", ",", "'l2'", ")", ":", "raise", "ValueError", "(", "'Must have one of \"l1\" or \"l2\" as src'", ")", "if", "src", "==", "'l1'", ":", "target_list", "=", "self", ".", "l2", "else", ":", "target_list", "=", "self", ".", "l1", "comparator", "=", "{", "'l1'", ":", "lambda", "s_idx", ",", "t_idx", ":", "(", "s_idx", ",", "t_idx", ")", "in", "self", ".", "matches", ",", "'l2'", ":", "lambda", "s_idx", ",", "t_idx", ":", "(", "t_idx", ",", "s_idx", ")", "in", "self", ".", "matches", ",", "}", "[", "src", "]", "return", "[", "(", "trg_idx", ",", "obj", ")", "for", "trg_idx", ",", "obj", "in", "enumerate", "(", "target_list", ")", "if", "comparator", "(", "src_idx", ",", "trg_idx", ")", "]" ]
Get elements equal to the idx'th in src from the other list. e.g. get_matches(self, 'l1', 0) will return all elements from self.l2 matching with self.l1[0]
[ "Get", "elements", "equal", "to", "the", "idx", "th", "in", "src", "from", "the", "other", "list", "." ]
adc6d372da018427e1db7b92424d3471e01a4118
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/comparator.py#L56-L74
train
ldomic/lintools
lintools/data.py
Data.find_the_closest_atoms
def find_the_closest_atoms(self,topology): """ This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph. """ # The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the # hydrogen atoms are not plotted in the final graph self.universe.load_new(topology) self.universe.ligand_noH = self.universe.ligand.select_atoms("not name H*") ligand_positions = self.universe.ligand_noH.positions for residue in self.dict_of_plotted_res.keys(): residue_selection = self.universe.select_atoms("resname "+residue[0]+" and resid "+residue[1]+" and segid "+ residue[2]) residue_positions = residue_selection.positions dist_array = MDAnalysis.analysis.distances.distance_array(ligand_positions,residue_positions) min_values_per_atom={} i=0 for atom in self.universe.ligand_noH: min_values_per_atom[atom.name]=dist_array[i].min() i+=1 sorted_min_values = sorted(min_values_per_atom.items(), key=operator.itemgetter(1)) self.closest_atoms[residue]=[(sorted_min_values[0][0],sorted_min_values[0][1])]
python
def find_the_closest_atoms(self,topology): """ This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph. """ # The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the # hydrogen atoms are not plotted in the final graph self.universe.load_new(topology) self.universe.ligand_noH = self.universe.ligand.select_atoms("not name H*") ligand_positions = self.universe.ligand_noH.positions for residue in self.dict_of_plotted_res.keys(): residue_selection = self.universe.select_atoms("resname "+residue[0]+" and resid "+residue[1]+" and segid "+ residue[2]) residue_positions = residue_selection.positions dist_array = MDAnalysis.analysis.distances.distance_array(ligand_positions,residue_positions) min_values_per_atom={} i=0 for atom in self.universe.ligand_noH: min_values_per_atom[atom.name]=dist_array[i].min() i+=1 sorted_min_values = sorted(min_values_per_atom.items(), key=operator.itemgetter(1)) self.closest_atoms[residue]=[(sorted_min_values[0][0],sorted_min_values[0][1])]
[ "def", "find_the_closest_atoms", "(", "self", ",", "topology", ")", ":", "# The measurements are made to ligand molecule without hydrogen atoms (ligand_noH) because the", "# hydrogen atoms are not plotted in the final graph", "self", ".", "universe", ".", "load_new", "(", "topology", ")", "self", ".", "universe", ".", "ligand_noH", "=", "self", ".", "universe", ".", "ligand", ".", "select_atoms", "(", "\"not name H*\"", ")", "ligand_positions", "=", "self", ".", "universe", ".", "ligand_noH", ".", "positions", "for", "residue", "in", "self", ".", "dict_of_plotted_res", ".", "keys", "(", ")", ":", "residue_selection", "=", "self", ".", "universe", ".", "select_atoms", "(", "\"resname \"", "+", "residue", "[", "0", "]", "+", "\" and resid \"", "+", "residue", "[", "1", "]", "+", "\" and segid \"", "+", "residue", "[", "2", "]", ")", "residue_positions", "=", "residue_selection", ".", "positions", "dist_array", "=", "MDAnalysis", ".", "analysis", ".", "distances", ".", "distance_array", "(", "ligand_positions", ",", "residue_positions", ")", "min_values_per_atom", "=", "{", "}", "i", "=", "0", "for", "atom", "in", "self", ".", "universe", ".", "ligand_noH", ":", "min_values_per_atom", "[", "atom", ".", "name", "]", "=", "dist_array", "[", "i", "]", ".", "min", "(", ")", "i", "+=", "1", "sorted_min_values", "=", "sorted", "(", "min_values_per_atom", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "self", ".", "closest_atoms", "[", "residue", "]", "=", "[", "(", "sorted_min_values", "[", "0", "]", "[", "0", "]", ",", "sorted_min_values", "[", "0", "]", "[", "1", "]", ")", "]" ]
This function defines the ligand atoms that are closest to the residues that will be plotted in the final graph.
[ "This", "function", "defines", "the", "ligand", "atoms", "that", "are", "closest", "to", "the", "residues", "that", "will", "be", "plotted", "in", "the", "final", "graph", "." ]
d825a4a7b35f3f857d3b81b46c9aee72b0ec697a
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/data.py#L130-L152
train
ldomic/lintools
lintools/data.py
Data.load_data
def load_data(self, topology, mol_file, ligand_name, offset=0): """ This function loads all relevant data - except trajectories since those are dealt with one at a time. Therefore, this process only needs to be done once, and every time a trajectory needs to be loaded, it can be loaded seperataly and the Data object can be shared across LINTools processes. """ self.load_topology(topology) self.renumber_system(offset) self.rename_ligand(ligand_name,mol_file) self.load_mol(mol_file)
python
def load_data(self, topology, mol_file, ligand_name, offset=0): """ This function loads all relevant data - except trajectories since those are dealt with one at a time. Therefore, this process only needs to be done once, and every time a trajectory needs to be loaded, it can be loaded seperataly and the Data object can be shared across LINTools processes. """ self.load_topology(topology) self.renumber_system(offset) self.rename_ligand(ligand_name,mol_file) self.load_mol(mol_file)
[ "def", "load_data", "(", "self", ",", "topology", ",", "mol_file", ",", "ligand_name", ",", "offset", "=", "0", ")", ":", "self", ".", "load_topology", "(", "topology", ")", "self", ".", "renumber_system", "(", "offset", ")", "self", ".", "rename_ligand", "(", "ligand_name", ",", "mol_file", ")", "self", ".", "load_mol", "(", "mol_file", ")" ]
This function loads all relevant data - except trajectories since those are dealt with one at a time. Therefore, this process only needs to be done once, and every time a trajectory needs to be loaded, it can be loaded seperataly and the Data object can be shared across LINTools processes.
[ "This", "function", "loads", "all", "relevant", "data", "-", "except", "trajectories", "since", "those", "are", "dealt", "with", "one", "at", "a", "time", ".", "Therefore", "this", "process", "only", "needs", "to", "be", "done", "once", "and", "every", "time", "a", "trajectory", "needs", "to", "be", "loaded", "it", "can", "be", "loaded", "seperataly", "and", "the", "Data", "object", "can", "be", "shared", "across", "LINTools", "processes", "." ]
d825a4a7b35f3f857d3b81b46c9aee72b0ec697a
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/data.py#L153-L163
train
ldomic/lintools
lintools/data.py
Data.analyse_topology
def analyse_topology(self,topology, cutoff=3.5): """ In case user wants to analyse only a single topology file, this process will determine the residues that should be plotted and find the ligand atoms closest to these residues. """ self.define_residues_for_plotting_topology(cutoff) self.find_the_closest_atoms(topology)
python
def analyse_topology(self,topology, cutoff=3.5): """ In case user wants to analyse only a single topology file, this process will determine the residues that should be plotted and find the ligand atoms closest to these residues. """ self.define_residues_for_plotting_topology(cutoff) self.find_the_closest_atoms(topology)
[ "def", "analyse_topology", "(", "self", ",", "topology", ",", "cutoff", "=", "3.5", ")", ":", "self", ".", "define_residues_for_plotting_topology", "(", "cutoff", ")", "self", ".", "find_the_closest_atoms", "(", "topology", ")" ]
In case user wants to analyse only a single topology file, this process will determine the residues that should be plotted and find the ligand atoms closest to these residues.
[ "In", "case", "user", "wants", "to", "analyse", "only", "a", "single", "topology", "file", "this", "process", "will", "determine", "the", "residues", "that", "should", "be", "plotted", "and", "find", "the", "ligand", "atoms", "closest", "to", "these", "residues", "." ]
d825a4a7b35f3f857d3b81b46c9aee72b0ec697a
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/data.py#L165-L172
train
robinandeer/puzzle
puzzle/utils/headers.py
get_header
def get_header(vcf_file_path): """Parse the header and return a header object Args: vcf_file_path(str): Path to vcf Returns: head: A HeaderParser object """ logger.info("Parsing header of file {0}".format(vcf_file_path)) head = HeaderParser() handle = get_vcf_handle(infile=vcf_file_path) # Parse the header for line in handle: line = line.rstrip() if line.startswith('#'): if line.startswith('##'): head.parse_meta_data(line) else: head.parse_header_line(line) else: break handle.close() return head
python
def get_header(vcf_file_path): """Parse the header and return a header object Args: vcf_file_path(str): Path to vcf Returns: head: A HeaderParser object """ logger.info("Parsing header of file {0}".format(vcf_file_path)) head = HeaderParser() handle = get_vcf_handle(infile=vcf_file_path) # Parse the header for line in handle: line = line.rstrip() if line.startswith('#'): if line.startswith('##'): head.parse_meta_data(line) else: head.parse_header_line(line) else: break handle.close() return head
[ "def", "get_header", "(", "vcf_file_path", ")", ":", "logger", ".", "info", "(", "\"Parsing header of file {0}\"", ".", "format", "(", "vcf_file_path", ")", ")", "head", "=", "HeaderParser", "(", ")", "handle", "=", "get_vcf_handle", "(", "infile", "=", "vcf_file_path", ")", "# Parse the header", "for", "line", "in", "handle", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "if", "line", ".", "startswith", "(", "'##'", ")", ":", "head", ".", "parse_meta_data", "(", "line", ")", "else", ":", "head", ".", "parse_header_line", "(", "line", ")", "else", ":", "break", "handle", ".", "close", "(", ")", "return", "head" ]
Parse the header and return a header object Args: vcf_file_path(str): Path to vcf Returns: head: A HeaderParser object
[ "Parse", "the", "header", "and", "return", "a", "header", "object" ]
9476f05b416d3a5135d25492cb31411fdf831c58
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/headers.py#L20-L45
train
DarkEnergySurvey/ugali
ugali/analysis/kernel.py
EllipticalKernel.sample_lonlat
def sample_lonlat(self, n): """ Sample 2D distribution of points in lon, lat """ # From http://en.wikipedia.org/wiki/Ellipse#General_parametric_form # However, Martin et al. (2009) use PA theta "from North to East" # Definition of phi (position angle) is offset by pi/4 # Definition of t (eccentric anamoly) remains the same (x,y-frame usual) # In the end, everything is trouble because we use glon, glat... radius = self.sample_radius(n) a = radius; b = self.jacobian * radius t = 2. * np.pi * np.random.rand(n) cost,sint = np.cos(t),np.sin(t) phi = np.pi/2. - np.deg2rad(self.theta) cosphi,sinphi = np.cos(phi),np.sin(phi) x = a*cost*cosphi - b*sint*sinphi y = a*cost*sinphi + b*sint*cosphi if self.projector is None: logger.debug("Creating AITOFF projector for sampling") projector = Projector(self.lon,self.lat,'ait') else: projector = self.projector lon, lat = projector.imageToSphere(x, y) return lon, lat
python
def sample_lonlat(self, n): """ Sample 2D distribution of points in lon, lat """ # From http://en.wikipedia.org/wiki/Ellipse#General_parametric_form # However, Martin et al. (2009) use PA theta "from North to East" # Definition of phi (position angle) is offset by pi/4 # Definition of t (eccentric anamoly) remains the same (x,y-frame usual) # In the end, everything is trouble because we use glon, glat... radius = self.sample_radius(n) a = radius; b = self.jacobian * radius t = 2. * np.pi * np.random.rand(n) cost,sint = np.cos(t),np.sin(t) phi = np.pi/2. - np.deg2rad(self.theta) cosphi,sinphi = np.cos(phi),np.sin(phi) x = a*cost*cosphi - b*sint*sinphi y = a*cost*sinphi + b*sint*cosphi if self.projector is None: logger.debug("Creating AITOFF projector for sampling") projector = Projector(self.lon,self.lat,'ait') else: projector = self.projector lon, lat = projector.imageToSphere(x, y) return lon, lat
[ "def", "sample_lonlat", "(", "self", ",", "n", ")", ":", "# From http://en.wikipedia.org/wiki/Ellipse#General_parametric_form", "# However, Martin et al. (2009) use PA theta \"from North to East\"", "# Definition of phi (position angle) is offset by pi/4", "# Definition of t (eccentric anamoly) remains the same (x,y-frame usual)", "# In the end, everything is trouble because we use glon, glat...", "radius", "=", "self", ".", "sample_radius", "(", "n", ")", "a", "=", "radius", "b", "=", "self", ".", "jacobian", "*", "radius", "t", "=", "2.", "*", "np", ".", "pi", "*", "np", ".", "random", ".", "rand", "(", "n", ")", "cost", ",", "sint", "=", "np", ".", "cos", "(", "t", ")", ",", "np", ".", "sin", "(", "t", ")", "phi", "=", "np", ".", "pi", "/", "2.", "-", "np", ".", "deg2rad", "(", "self", ".", "theta", ")", "cosphi", ",", "sinphi", "=", "np", ".", "cos", "(", "phi", ")", ",", "np", ".", "sin", "(", "phi", ")", "x", "=", "a", "*", "cost", "*", "cosphi", "-", "b", "*", "sint", "*", "sinphi", "y", "=", "a", "*", "cost", "*", "sinphi", "+", "b", "*", "sint", "*", "cosphi", "if", "self", ".", "projector", "is", "None", ":", "logger", ".", "debug", "(", "\"Creating AITOFF projector for sampling\"", ")", "projector", "=", "Projector", "(", "self", ".", "lon", ",", "self", ".", "lat", ",", "'ait'", ")", "else", ":", "projector", "=", "self", ".", "projector", "lon", ",", "lat", "=", "projector", ".", "imageToSphere", "(", "x", ",", "y", ")", "return", "lon", ",", "lat" ]
Sample 2D distribution of points in lon, lat
[ "Sample", "2D", "distribution", "of", "points", "in", "lon", "lat" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/kernel.py#L196-L222
train
alphagov/performanceplatform-collector
performanceplatform/collector/ga/plugins/aggregate.py
group
def group(iterable, key): """ groupby which sorts the input, discards the key and returns the output as a sequence of lists. """ for _, grouped in groupby(sorted(iterable, key=key), key=key): yield list(grouped)
python
def group(iterable, key): """ groupby which sorts the input, discards the key and returns the output as a sequence of lists. """ for _, grouped in groupby(sorted(iterable, key=key), key=key): yield list(grouped)
[ "def", "group", "(", "iterable", ",", "key", ")", ":", "for", "_", ",", "grouped", "in", "groupby", "(", "sorted", "(", "iterable", ",", "key", "=", "key", ")", ",", "key", "=", "key", ")", ":", "yield", "list", "(", "grouped", ")" ]
groupby which sorts the input, discards the key and returns the output as a sequence of lists.
[ "groupby", "which", "sorts", "the", "input", "discards", "the", "key", "and", "returns", "the", "output", "as", "a", "sequence", "of", "lists", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/ga/plugins/aggregate.py#L41-L47
train
alphagov/performanceplatform-collector
performanceplatform/collector/ga/plugins/aggregate.py
aggregate_count
def aggregate_count(keyname): """ Straightforward sum of the given keyname. """ def inner(docs): return sum(doc[keyname] for doc in docs) return keyname, inner
python
def aggregate_count(keyname): """ Straightforward sum of the given keyname. """ def inner(docs): return sum(doc[keyname] for doc in docs) return keyname, inner
[ "def", "aggregate_count", "(", "keyname", ")", ":", "def", "inner", "(", "docs", ")", ":", "return", "sum", "(", "doc", "[", "keyname", "]", "for", "doc", "in", "docs", ")", "return", "keyname", ",", "inner" ]
Straightforward sum of the given keyname.
[ "Straightforward", "sum", "of", "the", "given", "keyname", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/ga/plugins/aggregate.py#L50-L57
train
alphagov/performanceplatform-collector
performanceplatform/collector/ga/plugins/aggregate.py
aggregate_rate
def aggregate_rate(rate_key, count_key): """ Compute an aggregate rate for `rate_key` weighted according to `count_rate`. """ def inner(docs): total = sum(doc[count_key] for doc in docs) weighted_total = sum(doc[rate_key] * doc[count_key] for doc in docs) total_rate = weighted_total / total return total_rate return rate_key, inner
python
def aggregate_rate(rate_key, count_key): """ Compute an aggregate rate for `rate_key` weighted according to `count_rate`. """ def inner(docs): total = sum(doc[count_key] for doc in docs) weighted_total = sum(doc[rate_key] * doc[count_key] for doc in docs) total_rate = weighted_total / total return total_rate return rate_key, inner
[ "def", "aggregate_rate", "(", "rate_key", ",", "count_key", ")", ":", "def", "inner", "(", "docs", ")", ":", "total", "=", "sum", "(", "doc", "[", "count_key", "]", "for", "doc", "in", "docs", ")", "weighted_total", "=", "sum", "(", "doc", "[", "rate_key", "]", "*", "doc", "[", "count_key", "]", "for", "doc", "in", "docs", ")", "total_rate", "=", "weighted_total", "/", "total", "return", "total_rate", "return", "rate_key", ",", "inner" ]
Compute an aggregate rate for `rate_key` weighted according to `count_rate`.
[ "Compute", "an", "aggregate", "rate", "for", "rate_key", "weighted", "according", "to", "count_rate", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/ga/plugins/aggregate.py#L60-L71
train
alphagov/performanceplatform-collector
performanceplatform/collector/ga/plugins/aggregate.py
make_aggregate
def make_aggregate(docs, aggregations): """ Given `docs` and `aggregations` return a single document with the aggregations applied. """ new_doc = dict(docs[0]) for keyname, aggregation_function in aggregations: new_doc[keyname] = aggregation_function(docs) return new_doc
python
def make_aggregate(docs, aggregations): """ Given `docs` and `aggregations` return a single document with the aggregations applied. """ new_doc = dict(docs[0]) for keyname, aggregation_function in aggregations: new_doc[keyname] = aggregation_function(docs) return new_doc
[ "def", "make_aggregate", "(", "docs", ",", "aggregations", ")", ":", "new_doc", "=", "dict", "(", "docs", "[", "0", "]", ")", "for", "keyname", ",", "aggregation_function", "in", "aggregations", ":", "new_doc", "[", "keyname", "]", "=", "aggregation_function", "(", "docs", ")", "return", "new_doc" ]
Given `docs` and `aggregations` return a single document with the aggregations applied.
[ "Given", "docs", "and", "aggregations", "return", "a", "single", "document", "with", "the", "aggregations", "applied", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/ga/plugins/aggregate.py#L74-L84
train
MasterKale/django-cra-helper
cra_helper/templatetags/cra_helper_tags.py
json
def json(value): ''' Sanitize the JSON string using the Bleach HTML tag remover ''' uncleaned = jsonlib.dumps(value) clean = bleach.clean(uncleaned) return mark_safe(clean)
python
def json(value): ''' Sanitize the JSON string using the Bleach HTML tag remover ''' uncleaned = jsonlib.dumps(value) clean = bleach.clean(uncleaned) return mark_safe(clean)
[ "def", "json", "(", "value", ")", ":", "uncleaned", "=", "jsonlib", ".", "dumps", "(", "value", ")", "clean", "=", "bleach", ".", "clean", "(", "uncleaned", ")", "return", "mark_safe", "(", "clean", ")" ]
Sanitize the JSON string using the Bleach HTML tag remover
[ "Sanitize", "the", "JSON", "string", "using", "the", "Bleach", "HTML", "tag", "remover" ]
ba50c643c181a18b80ee9bbdbea74b58abd6daad
https://github.com/MasterKale/django-cra-helper/blob/ba50c643c181a18b80ee9bbdbea74b58abd6daad/cra_helper/templatetags/cra_helper_tags.py#L15-L21
train
cfusting/fastgp
fastgp/algorithms/afpo.py
find_pareto_front
def find_pareto_front(population): """Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals """ pareto_front = set(range(len(population))) for i in range(len(population)): if i not in pareto_front: continue ind1 = population[i] for j in range(i + 1, len(population)): ind2 = population[j] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2.fitness.dominates(ind1.fitness) or ind1.fitness == ind2.fitness: pareto_front.discard(i) if ind1.fitness.dominates(ind2.fitness): pareto_front.discard(j) return pareto_front
python
def find_pareto_front(population): """Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals """ pareto_front = set(range(len(population))) for i in range(len(population)): if i not in pareto_front: continue ind1 = population[i] for j in range(i + 1, len(population)): ind2 = population[j] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2.fitness.dominates(ind1.fitness) or ind1.fitness == ind2.fitness: pareto_front.discard(i) if ind1.fitness.dominates(ind2.fitness): pareto_front.discard(j) return pareto_front
[ "def", "find_pareto_front", "(", "population", ")", ":", "pareto_front", "=", "set", "(", "range", "(", "len", "(", "population", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "population", ")", ")", ":", "if", "i", "not", "in", "pareto_front", ":", "continue", "ind1", "=", "population", "[", "i", "]", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "population", ")", ")", ":", "ind2", "=", "population", "[", "j", "]", "# if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated", "# to prevent excessive growth of the Pareto front", "if", "ind2", ".", "fitness", ".", "dominates", "(", "ind1", ".", "fitness", ")", "or", "ind1", ".", "fitness", "==", "ind2", ".", "fitness", ":", "pareto_front", ".", "discard", "(", "i", ")", "if", "ind1", ".", "fitness", ".", "dominates", "(", "ind2", ".", "fitness", ")", ":", "pareto_front", ".", "discard", "(", "j", ")", "return", "pareto_front" ]
Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals
[ "Finds", "a", "subset", "of", "nondominated", "individuals", "in", "a", "given", "list" ]
6cf3c5d14abedaea064feef6ca434ee806a11756
https://github.com/cfusting/fastgp/blob/6cf3c5d14abedaea064feef6ca434ee806a11756/fastgp/algorithms/afpo.py#L28-L53
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser._to_ndarray
def _to_ndarray(self, a): """Casts Python lists and tuples to a numpy array or raises an AssertionError.""" if isinstance(a, (list, tuple)): a = numpy.array(a) if not is_ndarray(a): raise TypeError("Expected an ndarray but got object of type '{}' instead".format(type(a))) return a
python
def _to_ndarray(self, a): """Casts Python lists and tuples to a numpy array or raises an AssertionError.""" if isinstance(a, (list, tuple)): a = numpy.array(a) if not is_ndarray(a): raise TypeError("Expected an ndarray but got object of type '{}' instead".format(type(a))) return a
[ "def", "_to_ndarray", "(", "self", ",", "a", ")", ":", "if", "isinstance", "(", "a", ",", "(", "list", ",", "tuple", ")", ")", ":", "a", "=", "numpy", ".", "array", "(", "a", ")", "if", "not", "is_ndarray", "(", "a", ")", ":", "raise", "TypeError", "(", "\"Expected an ndarray but got object of type '{}' instead\"", ".", "format", "(", "type", "(", "a", ")", ")", ")", "return", "a" ]
Casts Python lists and tuples to a numpy array or raises an AssertionError.
[ "Casts", "Python", "lists", "and", "tuples", "to", "a", "numpy", "array", "or", "raises", "an", "AssertionError", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L310-L319
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_abs
def fn_abs(self, value): """ Return the absolute value of a number. :param value: The number. :return: The absolute value of the number. """ if is_ndarray(value): return numpy.absolute(value) else: return abs(value)
python
def fn_abs(self, value): """ Return the absolute value of a number. :param value: The number. :return: The absolute value of the number. """ if is_ndarray(value): return numpy.absolute(value) else: return abs(value)
[ "def", "fn_abs", "(", "self", ",", "value", ")", ":", "if", "is_ndarray", "(", "value", ")", ":", "return", "numpy", ".", "absolute", "(", "value", ")", "else", ":", "return", "abs", "(", "value", ")" ]
Return the absolute value of a number. :param value: The number. :return: The absolute value of the number.
[ "Return", "the", "absolute", "value", "of", "a", "number", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L321-L332
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_get_mask
def fn_get_mask(self, value): """ Return an array mask. :param value: The array. :return: The array mask. """ value = self._to_ndarray(value) if numpy.ma.is_masked(value): return value.mask else: return numpy.zeros(value.shape).astype(bool)
python
def fn_get_mask(self, value): """ Return an array mask. :param value: The array. :return: The array mask. """ value = self._to_ndarray(value) if numpy.ma.is_masked(value): return value.mask else: return numpy.zeros(value.shape).astype(bool)
[ "def", "fn_get_mask", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "_to_ndarray", "(", "value", ")", "if", "numpy", ".", "ma", ".", "is_masked", "(", "value", ")", ":", "return", "value", ".", "mask", "else", ":", "return", "numpy", ".", "zeros", "(", "value", ".", "shape", ")", ".", "astype", "(", "bool", ")" ]
Return an array mask. :param value: The array. :return: The array mask.
[ "Return", "an", "array", "mask", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L334-L347
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_min
def fn_min(self, a, axis=None): """ Return the minimum of an array, ignoring any NaNs. :param a: The array. :return: The minimum value of the array. """ return numpy.nanmin(self._to_ndarray(a), axis=axis)
python
def fn_min(self, a, axis=None): """ Return the minimum of an array, ignoring any NaNs. :param a: The array. :return: The minimum value of the array. """ return numpy.nanmin(self._to_ndarray(a), axis=axis)
[ "def", "fn_min", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanmin", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Return the minimum of an array, ignoring any NaNs. :param a: The array. :return: The minimum value of the array.
[ "Return", "the", "minimum", "of", "an", "array", "ignoring", "any", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L349-L357
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_max
def fn_max(self, a, axis=None): """ Return the maximum of an array, ignoring any NaNs. :param a: The array. :return: The maximum value of the array """ return numpy.nanmax(self._to_ndarray(a), axis=axis)
python
def fn_max(self, a, axis=None): """ Return the maximum of an array, ignoring any NaNs. :param a: The array. :return: The maximum value of the array """ return numpy.nanmax(self._to_ndarray(a), axis=axis)
[ "def", "fn_max", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanmax", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Return the maximum of an array, ignoring any NaNs. :param a: The array. :return: The maximum value of the array
[ "Return", "the", "maximum", "of", "an", "array", "ignoring", "any", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L370-L378
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_median
def fn_median(self, a, axis=None): """ Compute the median of an array, ignoring NaNs. :param a: The array. :return: The median value of the array. """ return numpy.nanmedian(self._to_ndarray(a), axis=axis)
python
def fn_median(self, a, axis=None): """ Compute the median of an array, ignoring NaNs. :param a: The array. :return: The median value of the array. """ return numpy.nanmedian(self._to_ndarray(a), axis=axis)
[ "def", "fn_median", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanmedian", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Compute the median of an array, ignoring NaNs. :param a: The array. :return: The median value of the array.
[ "Compute", "the", "median", "of", "an", "array", "ignoring", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L380-L388
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_mean
def fn_mean(self, a, axis=None): """ Compute the arithmetic mean of an array, ignoring NaNs. :param a: The array. :return: The arithmetic mean of the array. """ return numpy.nanmean(self._to_ndarray(a), axis=axis)
python
def fn_mean(self, a, axis=None): """ Compute the arithmetic mean of an array, ignoring NaNs. :param a: The array. :return: The arithmetic mean of the array. """ return numpy.nanmean(self._to_ndarray(a), axis=axis)
[ "def", "fn_mean", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanmean", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Compute the arithmetic mean of an array, ignoring NaNs. :param a: The array. :return: The arithmetic mean of the array.
[ "Compute", "the", "arithmetic", "mean", "of", "an", "array", "ignoring", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L390-L398
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_std
def fn_std(self, a, axis=None): """ Compute the standard deviation of an array, ignoring NaNs. :param a: The array. :return: The standard deviation of the array. """ return numpy.nanstd(self._to_ndarray(a), axis=axis)
python
def fn_std(self, a, axis=None): """ Compute the standard deviation of an array, ignoring NaNs. :param a: The array. :return: The standard deviation of the array. """ return numpy.nanstd(self._to_ndarray(a), axis=axis)
[ "def", "fn_std", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanstd", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Compute the standard deviation of an array, ignoring NaNs. :param a: The array. :return: The standard deviation of the array.
[ "Compute", "the", "standard", "deviation", "of", "an", "array", "ignoring", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L400-L408
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_var
def fn_var(self, a, axis=None): """ Compute the variance of an array, ignoring NaNs. :param a: The array. :return: The variance of the array. """ return numpy.nanvar(self._to_ndarray(a), axis=axis)
python
def fn_var(self, a, axis=None): """ Compute the variance of an array, ignoring NaNs. :param a: The array. :return: The variance of the array. """ return numpy.nanvar(self._to_ndarray(a), axis=axis)
[ "def", "fn_var", "(", "self", ",", "a", ",", "axis", "=", "None", ")", ":", "return", "numpy", ".", "nanvar", "(", "self", ".", "_to_ndarray", "(", "a", ")", ",", "axis", "=", "axis", ")" ]
Compute the variance of an array, ignoring NaNs. :param a: The array. :return: The variance of the array.
[ "Compute", "the", "variance", "of", "an", "array", "ignoring", "NaNs", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L410-L418
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_ceil
def fn_ceil(self, value): """ Return the ceiling of a number. :param value: The number. :return: The ceiling of the number. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return numpy.ceil(self._to_ndarray(value)) else: return math.ceil(value)
python
def fn_ceil(self, value): """ Return the ceiling of a number. :param value: The number. :return: The ceiling of the number. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return numpy.ceil(self._to_ndarray(value)) else: return math.ceil(value)
[ "def", "fn_ceil", "(", "self", ",", "value", ")", ":", "if", "is_ndarray", "(", "value", ")", "or", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "numpy", ".", "ceil", "(", "self", ".", "_to_ndarray", "(", "value", ")", ")", "else", ":", "return", "math", ".", "ceil", "(", "value", ")" ]
Return the ceiling of a number. :param value: The number. :return: The ceiling of the number.
[ "Return", "the", "ceiling", "of", "a", "number", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L433-L444
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_int
def fn_int(self, value): """ Return the value cast to an int. :param value: The number. :return: The number as an int. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype('int') else: return int(value)
python
def fn_int(self, value): """ Return the value cast to an int. :param value: The number. :return: The number as an int. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype('int') else: return int(value)
[ "def", "fn_int", "(", "self", ",", "value", ")", ":", "if", "is_ndarray", "(", "value", ")", "or", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "self", ".", "_to_ndarray", "(", "value", ")", ".", "astype", "(", "'int'", ")", "else", ":", "return", "int", "(", "value", ")" ]
Return the value cast to an int. :param value: The number. :return: The number as an int.
[ "Return", "the", "value", "cast", "to", "an", "int", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L446-L457
train
consbio/ncdjango
ncdjango/geoprocessing/evaluation.py
Parser.fn_float
def fn_float(self, value): """ Return the value cast to a float. :param value: The number. :return: The number as a float. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype('float') else: return float(value)
python
def fn_float(self, value): """ Return the value cast to a float. :param value: The number. :return: The number as a float. """ if is_ndarray(value) or isinstance(value, (list, tuple)): return self._to_ndarray(value).astype('float') else: return float(value)
[ "def", "fn_float", "(", "self", ",", "value", ")", ":", "if", "is_ndarray", "(", "value", ")", "or", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "self", ".", "_to_ndarray", "(", "value", ")", ".", "astype", "(", "'float'", ")", "else", ":", "return", "float", "(", "value", ")" ]
Return the value cast to a float. :param value: The number. :return: The number as a float.
[ "Return", "the", "value", "cast", "to", "a", "float", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/evaluation.py#L511-L522
train
totalgood/pugnlp
src/pugnlp/tutil.py
make_datetime
def make_datetime(dt, date_parser=parse_date): """Coerce a datetime or string into datetime.datetime object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date to be coerced into a `datetime.date` object Returns: datetime.time: Time of day portion of a `datetime` string or object >>> make_date('') datetime.date(1970, 1, 1) >>> make_date(None) datetime.date(1970, 1, 1) >>> make_date("11:59 PM") == datetime.date.today() True >>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59)) datetime.date(1999, 12, 31) >>> make_datetime(['1970-10-31', '1970-12-25']) # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0), datetime.datetime(1970, 12, 25, 0, 0)] """ if (isinstance(dt, (datetime.datetime, datetime.date, datetime.time, pd.Timestamp, np.datetime64)) or dt in (float('nan'), float('inf'), float('-inf'), None, '')): return dt if isinstance(dt, (float, int)): return datetime_from_ordinal_float(dt) if isinstance(dt, datetime.date): return datetime.datetime(dt.year, dt.month, dt.day) if isinstance(dt, datetime.time): return datetime.datetime(1, 1, 1, dt.hour, dt.minute, dt.second, dt.microsecond) if not dt: return datetime.datetime(1970, 1, 1) if isinstance(dt, basestring): try: return date_parser(dt) except ValueError: print('Unable to make_datetime({})'.format(dt)) raise try: return datetime.datetime(*dt.timetuple()[:7]) except AttributeError: try: dt = list(dt) if 0 < len(dt) < 7: try: return datetime.datetime(*dt[:7]) except (TypeError, IndexError, ValueError): pass except (TypeError, IndexError, ValueError, AttributeError): # dt is not iterable return dt return [make_datetime(val, date_parser=date_parser) for val in dt]
python
def make_datetime(dt, date_parser=parse_date): """Coerce a datetime or string into datetime.datetime object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date to be coerced into a `datetime.date` object Returns: datetime.time: Time of day portion of a `datetime` string or object >>> make_date('') datetime.date(1970, 1, 1) >>> make_date(None) datetime.date(1970, 1, 1) >>> make_date("11:59 PM") == datetime.date.today() True >>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59)) datetime.date(1999, 12, 31) >>> make_datetime(['1970-10-31', '1970-12-25']) # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0), datetime.datetime(1970, 12, 25, 0, 0)] """ if (isinstance(dt, (datetime.datetime, datetime.date, datetime.time, pd.Timestamp, np.datetime64)) or dt in (float('nan'), float('inf'), float('-inf'), None, '')): return dt if isinstance(dt, (float, int)): return datetime_from_ordinal_float(dt) if isinstance(dt, datetime.date): return datetime.datetime(dt.year, dt.month, dt.day) if isinstance(dt, datetime.time): return datetime.datetime(1, 1, 1, dt.hour, dt.minute, dt.second, dt.microsecond) if not dt: return datetime.datetime(1970, 1, 1) if isinstance(dt, basestring): try: return date_parser(dt) except ValueError: print('Unable to make_datetime({})'.format(dt)) raise try: return datetime.datetime(*dt.timetuple()[:7]) except AttributeError: try: dt = list(dt) if 0 < len(dt) < 7: try: return datetime.datetime(*dt[:7]) except (TypeError, IndexError, ValueError): pass except (TypeError, IndexError, ValueError, AttributeError): # dt is not iterable return dt return [make_datetime(val, date_parser=date_parser) for val in dt]
[ "def", "make_datetime", "(", "dt", ",", "date_parser", "=", "parse_date", ")", ":", "if", "(", "isinstance", "(", "dt", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "datetime", ".", "time", ",", "pd", ".", "Timestamp", ",", "np", ".", "datetime64", ")", ")", "or", "dt", "in", "(", "float", "(", "'nan'", ")", ",", "float", "(", "'inf'", ")", ",", "float", "(", "'-inf'", ")", ",", "None", ",", "''", ")", ")", ":", "return", "dt", "if", "isinstance", "(", "dt", ",", "(", "float", ",", "int", ")", ")", ":", "return", "datetime_from_ordinal_float", "(", "dt", ")", "if", "isinstance", "(", "dt", ",", "datetime", ".", "date", ")", ":", "return", "datetime", ".", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", "if", "isinstance", "(", "dt", ",", "datetime", ".", "time", ")", ":", "return", "datetime", ".", "datetime", "(", "1", ",", "1", ",", "1", ",", "dt", ".", "hour", ",", "dt", ".", "minute", ",", "dt", ".", "second", ",", "dt", ".", "microsecond", ")", "if", "not", "dt", ":", "return", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "if", "isinstance", "(", "dt", ",", "basestring", ")", ":", "try", ":", "return", "date_parser", "(", "dt", ")", "except", "ValueError", ":", "print", "(", "'Unable to make_datetime({})'", ".", "format", "(", "dt", ")", ")", "raise", "try", ":", "return", "datetime", ".", "datetime", "(", "*", "dt", ".", "timetuple", "(", ")", "[", ":", "7", "]", ")", "except", "AttributeError", ":", "try", ":", "dt", "=", "list", "(", "dt", ")", "if", "0", "<", "len", "(", "dt", ")", "<", "7", ":", "try", ":", "return", "datetime", ".", "datetime", "(", "*", "dt", "[", ":", "7", "]", ")", "except", "(", "TypeError", ",", "IndexError", ",", "ValueError", ")", ":", "pass", "except", "(", "TypeError", ",", "IndexError", ",", "ValueError", ",", "AttributeError", ")", ":", "# dt is not iterable", "return", "dt", "return", "[", "make_datetime", "(", "val", ",", "date_parser", "=", "date_parser", ")", "for", "val", "in", "dt", "]" ]
Coerce a datetime or string into datetime.datetime object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date to be coerced into a `datetime.date` object Returns: datetime.time: Time of day portion of a `datetime` string or object >>> make_date('') datetime.date(1970, 1, 1) >>> make_date(None) datetime.date(1970, 1, 1) >>> make_date("11:59 PM") == datetime.date.today() True >>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59)) datetime.date(1999, 12, 31) >>> make_datetime(['1970-10-31', '1970-12-25']) # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0), datetime.datetime(1970, 12, 25, 0, 0)]
[ "Coerce", "a", "datetime", "or", "string", "into", "datetime", ".", "datetime", "object" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/tutil.py#L140-L192
train
totalgood/pugnlp
src/pugnlp/tutil.py
quantize_datetime
def quantize_datetime(dt, resolution=None): """Quantize a datetime to integer years, months, days, hours, minutes, seconds or microseconds Also works with a `datetime.timetuple` or `time.struct_time` or a 1to9-tuple of ints or floats. Also works with a sequenece of struct_times, tuples, or datetimes >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=3) datetime.datetime(1970, 1, 2, 0, 0) Notice that 6 is the highest resolution value with any utility >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=7) datetime.datetime(1970, 1, 2, 3, 4, 5) >>> quantize_datetime(datetime.datetime(1971,2,3,4,5,6,7), 1) datetime.datetime(1971, 1, 1, 0, 0) """ # FIXME: this automatically truncates off microseconds just because timtuple() only goes out to sec resolution = int(resolution or 6) if hasattr(dt, 'timetuple'): dt = dt.timetuple() # strips timezone info if isinstance(dt, time.struct_time): # strip last 3 fields (tm_wday, tm_yday, tm_isdst) dt = list(dt)[:6] # struct_time has no microsecond, but accepts float seconds dt += [int((dt[5] - int(dt[5])) * 1000000)] dt[5] = int(dt[5]) return datetime.datetime(*(dt[:resolution] + [1] * max(3 - resolution, 0))) if isinstance(dt, tuple) and len(dt) <= 9 and all(isinstance(val, (float, int)) for val in dt): dt = list(dt) + [0] * (max(6 - len(dt), 0)) # if the 6th element of the tuple looks like a float set of seconds need to add microseconds if len(dt) == 6 and isinstance(dt[5], float): dt = list(dt) + [1000000 * (dt[5] - int(dt[5]))] dt[5] = int(dt[5]) dt = tuple(int(val) for val in dt) return datetime.datetime(*(dt[:resolution] + [1] * max(resolution - 3, 0))) return [quantize_datetime(value) for value in dt]
python
def quantize_datetime(dt, resolution=None): """Quantize a datetime to integer years, months, days, hours, minutes, seconds or microseconds Also works with a `datetime.timetuple` or `time.struct_time` or a 1to9-tuple of ints or floats. Also works with a sequenece of struct_times, tuples, or datetimes >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=3) datetime.datetime(1970, 1, 2, 0, 0) Notice that 6 is the highest resolution value with any utility >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=7) datetime.datetime(1970, 1, 2, 3, 4, 5) >>> quantize_datetime(datetime.datetime(1971,2,3,4,5,6,7), 1) datetime.datetime(1971, 1, 1, 0, 0) """ # FIXME: this automatically truncates off microseconds just because timtuple() only goes out to sec resolution = int(resolution or 6) if hasattr(dt, 'timetuple'): dt = dt.timetuple() # strips timezone info if isinstance(dt, time.struct_time): # strip last 3 fields (tm_wday, tm_yday, tm_isdst) dt = list(dt)[:6] # struct_time has no microsecond, but accepts float seconds dt += [int((dt[5] - int(dt[5])) * 1000000)] dt[5] = int(dt[5]) return datetime.datetime(*(dt[:resolution] + [1] * max(3 - resolution, 0))) if isinstance(dt, tuple) and len(dt) <= 9 and all(isinstance(val, (float, int)) for val in dt): dt = list(dt) + [0] * (max(6 - len(dt), 0)) # if the 6th element of the tuple looks like a float set of seconds need to add microseconds if len(dt) == 6 and isinstance(dt[5], float): dt = list(dt) + [1000000 * (dt[5] - int(dt[5]))] dt[5] = int(dt[5]) dt = tuple(int(val) for val in dt) return datetime.datetime(*(dt[:resolution] + [1] * max(resolution - 3, 0))) return [quantize_datetime(value) for value in dt]
[ "def", "quantize_datetime", "(", "dt", ",", "resolution", "=", "None", ")", ":", "# FIXME: this automatically truncates off microseconds just because timtuple() only goes out to sec", "resolution", "=", "int", "(", "resolution", "or", "6", ")", "if", "hasattr", "(", "dt", ",", "'timetuple'", ")", ":", "dt", "=", "dt", ".", "timetuple", "(", ")", "# strips timezone info", "if", "isinstance", "(", "dt", ",", "time", ".", "struct_time", ")", ":", "# strip last 3 fields (tm_wday, tm_yday, tm_isdst)", "dt", "=", "list", "(", "dt", ")", "[", ":", "6", "]", "# struct_time has no microsecond, but accepts float seconds", "dt", "+=", "[", "int", "(", "(", "dt", "[", "5", "]", "-", "int", "(", "dt", "[", "5", "]", ")", ")", "*", "1000000", ")", "]", "dt", "[", "5", "]", "=", "int", "(", "dt", "[", "5", "]", ")", "return", "datetime", ".", "datetime", "(", "*", "(", "dt", "[", ":", "resolution", "]", "+", "[", "1", "]", "*", "max", "(", "3", "-", "resolution", ",", "0", ")", ")", ")", "if", "isinstance", "(", "dt", ",", "tuple", ")", "and", "len", "(", "dt", ")", "<=", "9", "and", "all", "(", "isinstance", "(", "val", ",", "(", "float", ",", "int", ")", ")", "for", "val", "in", "dt", ")", ":", "dt", "=", "list", "(", "dt", ")", "+", "[", "0", "]", "*", "(", "max", "(", "6", "-", "len", "(", "dt", ")", ",", "0", ")", ")", "# if the 6th element of the tuple looks like a float set of seconds need to add microseconds", "if", "len", "(", "dt", ")", "==", "6", "and", "isinstance", "(", "dt", "[", "5", "]", ",", "float", ")", ":", "dt", "=", "list", "(", "dt", ")", "+", "[", "1000000", "*", "(", "dt", "[", "5", "]", "-", "int", "(", "dt", "[", "5", "]", ")", ")", "]", "dt", "[", "5", "]", "=", "int", "(", "dt", "[", "5", "]", ")", "dt", "=", "tuple", "(", "int", "(", "val", ")", "for", "val", "in", "dt", ")", "return", "datetime", ".", "datetime", "(", "*", "(", "dt", "[", ":", "resolution", "]", "+", "[", "1", "]", "*", "max", "(", "resolution", "-", "3", ",", "0", ")", ")", ")", "return", "[", "quantize_datetime", "(", "value", ")", "for", "value", "in", "dt", "]" ]
Quantize a datetime to integer years, months, days, hours, minutes, seconds or microseconds Also works with a `datetime.timetuple` or `time.struct_time` or a 1to9-tuple of ints or floats. Also works with a sequenece of struct_times, tuples, or datetimes >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=3) datetime.datetime(1970, 1, 2, 0, 0) Notice that 6 is the highest resolution value with any utility >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=7) datetime.datetime(1970, 1, 2, 3, 4, 5) >>> quantize_datetime(datetime.datetime(1971,2,3,4,5,6,7), 1) datetime.datetime(1971, 1, 1, 0, 0)
[ "Quantize", "a", "datetime", "to", "integer", "years", "months", "days", "hours", "minutes", "seconds", "or", "microseconds" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/tutil.py#L229-L266
train
totalgood/pugnlp
src/pugnlp/tutil.py
timetag_str
def timetag_str(dt=None, sep='-', filler='0', resolution=6): """Generate a date-time tag suitable for appending to a file name. >>> timetag_str(resolution=3) == '-'.join('{0:02d}'.format(i) for i in ... tuple(datetime.datetime.now().timetuple()[:3])) True >>> timetag_str(datetime.datetime(2004,12,8,1,2,3,400000)) '2004-12-08-01-02-03' >>> timetag_str(datetime.datetime(2004,12,8)) '2004-12-08-00-00-00' >>> timetag_str(datetime.datetime(2003,6,19), filler='') '2003-6-19-0-0-0' """ resolution = int(resolution or 6) if sep in (None, False): sep = '' sep = str(sep) dt = datetime.datetime.now() if dt is None else dt # FIXME: don't use timetuple which truncates microseconds return sep.join(('{0:' + filler + ('2' if filler else '') + 'd}').format(i) for i in tuple(dt.timetuple()[:resolution]))
python
def timetag_str(dt=None, sep='-', filler='0', resolution=6): """Generate a date-time tag suitable for appending to a file name. >>> timetag_str(resolution=3) == '-'.join('{0:02d}'.format(i) for i in ... tuple(datetime.datetime.now().timetuple()[:3])) True >>> timetag_str(datetime.datetime(2004,12,8,1,2,3,400000)) '2004-12-08-01-02-03' >>> timetag_str(datetime.datetime(2004,12,8)) '2004-12-08-00-00-00' >>> timetag_str(datetime.datetime(2003,6,19), filler='') '2003-6-19-0-0-0' """ resolution = int(resolution or 6) if sep in (None, False): sep = '' sep = str(sep) dt = datetime.datetime.now() if dt is None else dt # FIXME: don't use timetuple which truncates microseconds return sep.join(('{0:' + filler + ('2' if filler else '') + 'd}').format(i) for i in tuple(dt.timetuple()[:resolution]))
[ "def", "timetag_str", "(", "dt", "=", "None", ",", "sep", "=", "'-'", ",", "filler", "=", "'0'", ",", "resolution", "=", "6", ")", ":", "resolution", "=", "int", "(", "resolution", "or", "6", ")", "if", "sep", "in", "(", "None", ",", "False", ")", ":", "sep", "=", "''", "sep", "=", "str", "(", "sep", ")", "dt", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "dt", "is", "None", "else", "dt", "# FIXME: don't use timetuple which truncates microseconds", "return", "sep", ".", "join", "(", "(", "'{0:'", "+", "filler", "+", "(", "'2'", "if", "filler", "else", "''", ")", "+", "'d}'", ")", ".", "format", "(", "i", ")", "for", "i", "in", "tuple", "(", "dt", ".", "timetuple", "(", ")", "[", ":", "resolution", "]", ")", ")" ]
Generate a date-time tag suitable for appending to a file name. >>> timetag_str(resolution=3) == '-'.join('{0:02d}'.format(i) for i in ... tuple(datetime.datetime.now().timetuple()[:3])) True >>> timetag_str(datetime.datetime(2004,12,8,1,2,3,400000)) '2004-12-08-01-02-03' >>> timetag_str(datetime.datetime(2004,12,8)) '2004-12-08-00-00-00' >>> timetag_str(datetime.datetime(2003,6,19), filler='') '2003-6-19-0-0-0'
[ "Generate", "a", "date", "-", "time", "tag", "suitable", "for", "appending", "to", "a", "file", "name", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/tutil.py#L319-L339
train
totalgood/pugnlp
src/pugnlp/tutil.py
make_tz_aware
def make_tz_aware(dt, tz='UTC', is_dst=None): """Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>) """ # make sure dt is a datetime, time, or list of datetime/times dt = make_datetime(dt) if not isinstance(dt, (list, datetime.datetime, datetime.date, datetime.time, pd.Timestamp)): return dt # TODO: deal with sequence of timezones try: tz = dt.tzinfo or tz except (ValueError, AttributeError, TypeError): pass try: tzstr = str(tz).strip().upper() if tzstr in TZ_ABBREV_NAME: is_dst = is_dst or tzstr.endswith('DT') tz = TZ_ABBREV_NAME.get(tzstr, tz) except (ValueError, AttributeError, TypeError): pass try: tz = pytz.timezone(tz) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() pass try: return tz.localize(dt, is_dst=is_dst) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() # TypeError: unsupported operand type(s) for +: 'datetime.time' and 'datetime.timedelta' pass # could be datetime.time, which can't be localized. Insted `replace` the TZ # don't try/except in case dt is not a datetime or time type -- should raise an exception if not isinstance(dt, list): return dt.replace(tzinfo=tz) return [make_tz_aware(dt0, tz=tz, is_dst=is_dst) for dt0 in dt]
python
def make_tz_aware(dt, tz='UTC', is_dst=None): """Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>) """ # make sure dt is a datetime, time, or list of datetime/times dt = make_datetime(dt) if not isinstance(dt, (list, datetime.datetime, datetime.date, datetime.time, pd.Timestamp)): return dt # TODO: deal with sequence of timezones try: tz = dt.tzinfo or tz except (ValueError, AttributeError, TypeError): pass try: tzstr = str(tz).strip().upper() if tzstr in TZ_ABBREV_NAME: is_dst = is_dst or tzstr.endswith('DT') tz = TZ_ABBREV_NAME.get(tzstr, tz) except (ValueError, AttributeError, TypeError): pass try: tz = pytz.timezone(tz) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() pass try: return tz.localize(dt, is_dst=is_dst) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() # TypeError: unsupported operand type(s) for +: 'datetime.time' and 'datetime.timedelta' pass # could be datetime.time, which can't be localized. Insted `replace` the TZ # don't try/except in case dt is not a datetime or time type -- should raise an exception if not isinstance(dt, list): return dt.replace(tzinfo=tz) return [make_tz_aware(dt0, tz=tz, is_dst=is_dst) for dt0 in dt]
[ "def", "make_tz_aware", "(", "dt", ",", "tz", "=", "'UTC'", ",", "is_dst", "=", "None", ")", ":", "# make sure dt is a datetime, time, or list of datetime/times", "dt", "=", "make_datetime", "(", "dt", ")", "if", "not", "isinstance", "(", "dt", ",", "(", "list", ",", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "datetime", ".", "time", ",", "pd", ".", "Timestamp", ")", ")", ":", "return", "dt", "# TODO: deal with sequence of timezones", "try", ":", "tz", "=", "dt", ".", "tzinfo", "or", "tz", "except", "(", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "pass", "try", ":", "tzstr", "=", "str", "(", "tz", ")", ".", "strip", "(", ")", ".", "upper", "(", ")", "if", "tzstr", "in", "TZ_ABBREV_NAME", ":", "is_dst", "=", "is_dst", "or", "tzstr", ".", "endswith", "(", "'DT'", ")", "tz", "=", "TZ_ABBREV_NAME", ".", "get", "(", "tzstr", ",", "tz", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "pass", "try", ":", "tz", "=", "pytz", ".", "timezone", "(", "tz", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "# from traceback import print_exc", "# print_exc()", "pass", "try", ":", "return", "tz", ".", "localize", "(", "dt", ",", "is_dst", "=", "is_dst", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "# from traceback import print_exc", "# print_exc() # TypeError: unsupported operand type(s) for +: 'datetime.time' and 'datetime.timedelta'", "pass", "# could be datetime.time, which can't be localized. Insted `replace` the TZ", "# don't try/except in case dt is not a datetime or time type -- should raise an exception", "if", "not", "isinstance", "(", "dt", ",", "list", ")", ":", "return", "dt", ".", "replace", "(", "tzinfo", "=", "tz", ")", "return", "[", "make_tz_aware", "(", "dt0", ",", "tz", "=", "tz", ",", "is_dst", "=", "is_dst", ")", "for", "dt0", "in", "dt", "]" ]
Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
[ "Add", "timezone", "information", "to", "a", "datetime", "object", "only", "if", "it", "is", "naive", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/tutil.py#L349-L408
train
hootnot/postcode-api-wrapper
postcodepy/typedefs.py
translate_addresstype
def translate_addresstype(f): """decorator to translate the addressType field. translate the value of the addressType field of the API response into a translated type. """ @wraps(f) def wr(r, pc): at = r["addressType"] try: r.update({"addressType": POSTCODE_API_TYPEDEFS_ADDRESS_TYPES[at]}) except: logger.warning("Warning: {}: " "unknown 'addressType': {}".format(pc, at)) return f(r, pc) return wr
python
def translate_addresstype(f): """decorator to translate the addressType field. translate the value of the addressType field of the API response into a translated type. """ @wraps(f) def wr(r, pc): at = r["addressType"] try: r.update({"addressType": POSTCODE_API_TYPEDEFS_ADDRESS_TYPES[at]}) except: logger.warning("Warning: {}: " "unknown 'addressType': {}".format(pc, at)) return f(r, pc) return wr
[ "def", "translate_addresstype", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wr", "(", "r", ",", "pc", ")", ":", "at", "=", "r", "[", "\"addressType\"", "]", "try", ":", "r", ".", "update", "(", "{", "\"addressType\"", ":", "POSTCODE_API_TYPEDEFS_ADDRESS_TYPES", "[", "at", "]", "}", ")", "except", ":", "logger", ".", "warning", "(", "\"Warning: {}: \"", "\"unknown 'addressType': {}\"", ".", "format", "(", "pc", ",", "at", ")", ")", "return", "f", "(", "r", ",", "pc", ")", "return", "wr" ]
decorator to translate the addressType field. translate the value of the addressType field of the API response into a translated type.
[ "decorator", "to", "translate", "the", "addressType", "field", "." ]
42359cb9402f84a06f7d58f889f1156d653f5ea9
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/typedefs.py#L55-L72
train
hootnot/postcode-api-wrapper
postcodepy/typedefs.py
translate_purposes
def translate_purposes(f): """decorator to translate the purposes field. translate the values of the purposes field of the API response into translated values. """ @wraps(f) def wr(r, pc): tmp = [] for P in r["purposes"]: try: tmp.append(POSTCODE_API_TYPEDEFS_PURPOSES[P]) except: logger.warning("Warning: {}: " "cannot translate 'purpose': {}".format(pc, P)) tmp.append(P) r.update({"purposes": tmp}) return f(r, pc) return wr
python
def translate_purposes(f): """decorator to translate the purposes field. translate the values of the purposes field of the API response into translated values. """ @wraps(f) def wr(r, pc): tmp = [] for P in r["purposes"]: try: tmp.append(POSTCODE_API_TYPEDEFS_PURPOSES[P]) except: logger.warning("Warning: {}: " "cannot translate 'purpose': {}".format(pc, P)) tmp.append(P) r.update({"purposes": tmp}) return f(r, pc) return wr
[ "def", "translate_purposes", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wr", "(", "r", ",", "pc", ")", ":", "tmp", "=", "[", "]", "for", "P", "in", "r", "[", "\"purposes\"", "]", ":", "try", ":", "tmp", ".", "append", "(", "POSTCODE_API_TYPEDEFS_PURPOSES", "[", "P", "]", ")", "except", ":", "logger", ".", "warning", "(", "\"Warning: {}: \"", "\"cannot translate 'purpose': {}\"", ".", "format", "(", "pc", ",", "P", ")", ")", "tmp", ".", "append", "(", "P", ")", "r", ".", "update", "(", "{", "\"purposes\"", ":", "tmp", "}", ")", "return", "f", "(", "r", ",", "pc", ")", "return", "wr" ]
decorator to translate the purposes field. translate the values of the purposes field of the API response into translated values.
[ "decorator", "to", "translate", "the", "purposes", "field", "." ]
42359cb9402f84a06f7d58f889f1156d653f5ea9
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/typedefs.py#L75-L95
train
consbio/ncdjango
ncdjango/interfaces/data/classify.py
quantile
def quantile(data, num_breaks): """ Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """ def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] # Initialization & checks --------- data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") # if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked # p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
python
def quantile(data, num_breaks): """ Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """ def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] # Initialization & checks --------- data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") # if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked # p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
[ "def", "quantile", "(", "data", ",", "num_breaks", ")", ":", "def", "scipy_mquantiles", "(", "a", ",", "prob", "=", "list", "(", "[", ".25", ",", ".5", ",", ".75", "]", ")", ",", "alphap", "=", ".4", ",", "betap", "=", ".4", ",", "axis", "=", "None", ",", "limit", "=", "(", ")", ")", ":", "\"\"\" function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles \"\"\"", "def", "_quantiles1D", "(", "data", ",", "m", ",", "p", ")", ":", "x", "=", "numpy", ".", "sort", "(", "data", ".", "compressed", "(", ")", ")", "n", "=", "len", "(", "x", ")", "if", "n", "==", "0", ":", "return", "numpy", ".", "ma", ".", "array", "(", "numpy", ".", "empty", "(", "len", "(", "p", ")", ",", "dtype", "=", "float", ")", ",", "mask", "=", "True", ")", "elif", "n", "==", "1", ":", "return", "numpy", ".", "ma", ".", "array", "(", "numpy", ".", "resize", "(", "x", ",", "p", ".", "shape", ")", ",", "mask", "=", "numpy", ".", "ma", ".", "nomask", ")", "aleph", "=", "(", "n", "*", "p", "+", "m", ")", "k", "=", "numpy", ".", "floor", "(", "aleph", ".", "clip", "(", "1", ",", "n", "-", "1", ")", ")", ".", "astype", "(", "int", ")", "gamma", "=", "(", "aleph", "-", "k", ")", ".", "clip", "(", "0", ",", "1", ")", "return", "(", "1.", "-", "gamma", ")", "*", "x", "[", "(", "k", "-", "1", ")", ".", "tolist", "(", ")", "]", "+", "gamma", "*", "x", "[", "k", ".", "tolist", "(", ")", "]", "# Initialization & checks ---------", "data", "=", "numpy", ".", "ma", ".", "array", "(", "a", ",", "copy", "=", "False", ")", "if", "data", ".", "ndim", ">", "2", ":", "raise", "TypeError", "(", "\"Array should be 2D at most !\"", ")", "#", "if", "limit", ":", "condition", "=", "(", "limit", "[", "0", "]", "<", "data", ")", "&", "(", "data", "<", "limit", "[", "1", "]", ")", "data", "[", "~", "condition", ".", "filled", "(", "True", ")", "]", "=", "numpy", ".", "ma", ".", "masked", "#", "p", "=", "numpy", ".", "array", "(", "prob", ",", "copy", "=", "False", ",", "ndmin", "=", "1", ")", "m", "=", "alphap", "+", "p", "*", "(", "1.", "-", "alphap", "-", "betap", ")", "# Computes quantiles along axis (or globally)", "if", "(", "axis", "is", "None", ")", ":", "return", "_quantiles1D", "(", "data", ",", "m", ",", "p", ")", "return", "numpy", ".", "ma", ".", "apply_along_axis", "(", "_quantiles1D", ",", "axis", ",", "data", ",", "m", ",", "p", ")", "return", "scipy_mquantiles", "(", "data", ",", "numpy", ".", "linspace", "(", "1.0", "/", "num_breaks", ",", "1", ",", "num_breaks", ")", ")" ]
Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform.
[ "Calculate", "quantile", "breaks", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/data/classify.py#L91-L131
train
consbio/ncdjango
ncdjango/interfaces/data/classify.py
equal
def equal(data, num_breaks): """ Calculate equal interval breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ step = (numpy.amax(data) - numpy.amin(data)) / num_breaks return numpy.linspace(numpy.amin(data) + step, numpy.amax(data), num_breaks)
python
def equal(data, num_breaks): """ Calculate equal interval breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ step = (numpy.amax(data) - numpy.amin(data)) / num_breaks return numpy.linspace(numpy.amin(data) + step, numpy.amax(data), num_breaks)
[ "def", "equal", "(", "data", ",", "num_breaks", ")", ":", "step", "=", "(", "numpy", ".", "amax", "(", "data", ")", "-", "numpy", ".", "amin", "(", "data", ")", ")", "/", "num_breaks", "return", "numpy", ".", "linspace", "(", "numpy", ".", "amin", "(", "data", ")", "+", "step", ",", "numpy", ".", "amax", "(", "data", ")", ",", "num_breaks", ")" ]
Calculate equal interval breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform.
[ "Calculate", "equal", "interval", "breaks", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/data/classify.py#L134-L144
train
DarkEnergySurvey/ugali
ugali/utils/fileio.py
add_column
def add_column(filename,column,formula,force=False): """ Add a column to a FITS file. ADW: Could this be replaced by a ftool? """ columns = parse_formula(formula) logger.info("Running file: %s"%filename) logger.debug(" Reading columns: %s"%columns) data = fitsio.read(filename,columns=columns) logger.debug(' Evaluating formula: %s'%formula) col = eval(formula) col = np.asarray(col,dtype=[(column,col.dtype)]) insert_columns(filename,col,force=force) return True
python
def add_column(filename,column,formula,force=False): """ Add a column to a FITS file. ADW: Could this be replaced by a ftool? """ columns = parse_formula(formula) logger.info("Running file: %s"%filename) logger.debug(" Reading columns: %s"%columns) data = fitsio.read(filename,columns=columns) logger.debug(' Evaluating formula: %s'%formula) col = eval(formula) col = np.asarray(col,dtype=[(column,col.dtype)]) insert_columns(filename,col,force=force) return True
[ "def", "add_column", "(", "filename", ",", "column", ",", "formula", ",", "force", "=", "False", ")", ":", "columns", "=", "parse_formula", "(", "formula", ")", "logger", ".", "info", "(", "\"Running file: %s\"", "%", "filename", ")", "logger", ".", "debug", "(", "\" Reading columns: %s\"", "%", "columns", ")", "data", "=", "fitsio", ".", "read", "(", "filename", ",", "columns", "=", "columns", ")", "logger", ".", "debug", "(", "' Evaluating formula: %s'", "%", "formula", ")", "col", "=", "eval", "(", "formula", ")", "col", "=", "np", ".", "asarray", "(", "col", ",", "dtype", "=", "[", "(", "column", ",", "col", ".", "dtype", ")", "]", ")", "insert_columns", "(", "filename", ",", "col", ",", "force", "=", "force", ")", "return", "True" ]
Add a column to a FITS file. ADW: Could this be replaced by a ftool?
[ "Add", "a", "column", "to", "a", "FITS", "file", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/fileio.py#L70-L85
train
DarkEnergySurvey/ugali
ugali/utils/fileio.py
load_files
def load_files(filenames,multiproc=False,**kwargs): """ Load a set of FITS files with kwargs. """ filenames = np.atleast_1d(filenames) logger.debug("Loading %s files..."%len(filenames)) kwargs = [dict(filename=f,**kwargs) for f in filenames] if multiproc: from multiprocessing import Pool processes = multiproc if multiproc > 0 else None p = Pool(processes,maxtasksperchild=1) out = p.map(load_file,kwargs) else: out = [load_file(kw) for kw in kwargs] dtype = out[0].dtype for i,d in enumerate(out): if d.dtype != dtype: # ADW: Not really safe... logger.warn("Casting input data to same type.") out[i] = d.astype(dtype,copy=False) logger.debug('Concatenating arrays...') return np.concatenate(out)
python
def load_files(filenames,multiproc=False,**kwargs): """ Load a set of FITS files with kwargs. """ filenames = np.atleast_1d(filenames) logger.debug("Loading %s files..."%len(filenames)) kwargs = [dict(filename=f,**kwargs) for f in filenames] if multiproc: from multiprocessing import Pool processes = multiproc if multiproc > 0 else None p = Pool(processes,maxtasksperchild=1) out = p.map(load_file,kwargs) else: out = [load_file(kw) for kw in kwargs] dtype = out[0].dtype for i,d in enumerate(out): if d.dtype != dtype: # ADW: Not really safe... logger.warn("Casting input data to same type.") out[i] = d.astype(dtype,copy=False) logger.debug('Concatenating arrays...') return np.concatenate(out)
[ "def", "load_files", "(", "filenames", ",", "multiproc", "=", "False", ",", "*", "*", "kwargs", ")", ":", "filenames", "=", "np", ".", "atleast_1d", "(", "filenames", ")", "logger", ".", "debug", "(", "\"Loading %s files...\"", "%", "len", "(", "filenames", ")", ")", "kwargs", "=", "[", "dict", "(", "filename", "=", "f", ",", "*", "*", "kwargs", ")", "for", "f", "in", "filenames", "]", "if", "multiproc", ":", "from", "multiprocessing", "import", "Pool", "processes", "=", "multiproc", "if", "multiproc", ">", "0", "else", "None", "p", "=", "Pool", "(", "processes", ",", "maxtasksperchild", "=", "1", ")", "out", "=", "p", ".", "map", "(", "load_file", ",", "kwargs", ")", "else", ":", "out", "=", "[", "load_file", "(", "kw", ")", "for", "kw", "in", "kwargs", "]", "dtype", "=", "out", "[", "0", "]", ".", "dtype", "for", "i", ",", "d", "in", "enumerate", "(", "out", ")", ":", "if", "d", ".", "dtype", "!=", "dtype", ":", "# ADW: Not really safe...", "logger", ".", "warn", "(", "\"Casting input data to same type.\"", ")", "out", "[", "i", "]", "=", "d", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")", "logger", ".", "debug", "(", "'Concatenating arrays...'", ")", "return", "np", ".", "concatenate", "(", "out", ")" ]
Load a set of FITS files with kwargs.
[ "Load", "a", "set", "of", "FITS", "files", "with", "kwargs", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/fileio.py#L98-L121
train
DarkEnergySurvey/ugali
ugali/scratch/simulation/survey_selection_function.py
surveySelectionFunction.applyFracdet
def applyFracdet(self, lon, lat): """ We want to enforce minimum fracdet for a satellite to be considered detectable True is passes fracdet cut """ self.loadFracdet() fracdet_core = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.1, len(lon))) fracdet_wide = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.5, len(lon))) return (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold']) \ & (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold'])
python
def applyFracdet(self, lon, lat): """ We want to enforce minimum fracdet for a satellite to be considered detectable True is passes fracdet cut """ self.loadFracdet() fracdet_core = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.1, len(lon))) fracdet_wide = meanFracdet(self.m_fracdet, lon, lat, np.tile(0.5, len(lon))) return (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold']) \ & (fracdet_core >= self.config[self.algorithm]['fracdet_core_threshold'])
[ "def", "applyFracdet", "(", "self", ",", "lon", ",", "lat", ")", ":", "self", ".", "loadFracdet", "(", ")", "fracdet_core", "=", "meanFracdet", "(", "self", ".", "m_fracdet", ",", "lon", ",", "lat", ",", "np", ".", "tile", "(", "0.1", ",", "len", "(", "lon", ")", ")", ")", "fracdet_wide", "=", "meanFracdet", "(", "self", ".", "m_fracdet", ",", "lon", ",", "lat", ",", "np", ".", "tile", "(", "0.5", ",", "len", "(", "lon", ")", ")", ")", "return", "(", "fracdet_core", ">=", "self", ".", "config", "[", "self", ".", "algorithm", "]", "[", "'fracdet_core_threshold'", "]", ")", "&", "(", "fracdet_core", ">=", "self", ".", "config", "[", "self", ".", "algorithm", "]", "[", "'fracdet_core_threshold'", "]", ")" ]
We want to enforce minimum fracdet for a satellite to be considered detectable True is passes fracdet cut
[ "We", "want", "to", "enforce", "minimum", "fracdet", "for", "a", "satellite", "to", "be", "considered", "detectable" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L385-L395
train
DarkEnergySurvey/ugali
ugali/scratch/simulation/survey_selection_function.py
surveySelectionFunction.applyHotspot
def applyHotspot(self, lon, lat): """ Exclude objects that are too close to hotspot True if passes hotspot cut """ self.loadRealResults() cut_detect_real = (self.data_real['SIG'] >= self.config[self.algorithm]['sig_threshold']) lon_real = self.data_real['RA'][cut_detect_real] lat_real = self.data_real['DEC'][cut_detect_real] cut_hotspot = np.tile(True, len(lon)) for ii in range(0, len(lon)): cut_hotspot[ii] = ~np.any(angsep(lon[ii], lat[ii], lon_real, lat_real) < self.config[self.algorithm]['hotspot_angsep_threshold']) return cut_hotspot
python
def applyHotspot(self, lon, lat): """ Exclude objects that are too close to hotspot True if passes hotspot cut """ self.loadRealResults() cut_detect_real = (self.data_real['SIG'] >= self.config[self.algorithm]['sig_threshold']) lon_real = self.data_real['RA'][cut_detect_real] lat_real = self.data_real['DEC'][cut_detect_real] cut_hotspot = np.tile(True, len(lon)) for ii in range(0, len(lon)): cut_hotspot[ii] = ~np.any(angsep(lon[ii], lat[ii], lon_real, lat_real) < self.config[self.algorithm]['hotspot_angsep_threshold']) return cut_hotspot
[ "def", "applyHotspot", "(", "self", ",", "lon", ",", "lat", ")", ":", "self", ".", "loadRealResults", "(", ")", "cut_detect_real", "=", "(", "self", ".", "data_real", "[", "'SIG'", "]", ">=", "self", ".", "config", "[", "self", ".", "algorithm", "]", "[", "'sig_threshold'", "]", ")", "lon_real", "=", "self", ".", "data_real", "[", "'RA'", "]", "[", "cut_detect_real", "]", "lat_real", "=", "self", ".", "data_real", "[", "'DEC'", "]", "[", "cut_detect_real", "]", "cut_hotspot", "=", "np", ".", "tile", "(", "True", ",", "len", "(", "lon", ")", ")", "for", "ii", "in", "range", "(", "0", ",", "len", "(", "lon", ")", ")", ":", "cut_hotspot", "[", "ii", "]", "=", "~", "np", ".", "any", "(", "angsep", "(", "lon", "[", "ii", "]", ",", "lat", "[", "ii", "]", ",", "lon_real", ",", "lat_real", ")", "<", "self", ".", "config", "[", "self", ".", "algorithm", "]", "[", "'hotspot_angsep_threshold'", "]", ")", "return", "cut_hotspot" ]
Exclude objects that are too close to hotspot True if passes hotspot cut
[ "Exclude", "objects", "that", "are", "too", "close", "to", "hotspot" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L397-L412
train
DarkEnergySurvey/ugali
ugali/scratch/simulation/survey_selection_function.py
surveySelectionFunction.predict
def predict(self, lon, lat, **kwargs): """ distance, abs_mag, r_physical """ assert self.classifier is not None, 'ERROR' pred = np.zeros(len(lon)) cut_geometry, flags_geometry = self.applyGeometry(lon, lat) x_test = [] for key, operation in self.config['operation']['params_intrinsic']: assert operation.lower() in ['linear', 'log'], 'ERROR' if operation.lower() == 'linear': x_test.append(kwargs[key]) else: x_test.append(np.log10(kwargs[key])) x_test = np.vstack(x_test).T #import pdb; pdb.set_trace() pred[cut_geometry] = self.classifier.predict_proba(x_test[cut_geometry])[:,1] self.validatePredict(pred, flags_geometry, lon, lat, kwargs['r_physical'], kwargs['abs_mag'], kwargs['distance']) return pred, flags_geometry
python
def predict(self, lon, lat, **kwargs): """ distance, abs_mag, r_physical """ assert self.classifier is not None, 'ERROR' pred = np.zeros(len(lon)) cut_geometry, flags_geometry = self.applyGeometry(lon, lat) x_test = [] for key, operation in self.config['operation']['params_intrinsic']: assert operation.lower() in ['linear', 'log'], 'ERROR' if operation.lower() == 'linear': x_test.append(kwargs[key]) else: x_test.append(np.log10(kwargs[key])) x_test = np.vstack(x_test).T #import pdb; pdb.set_trace() pred[cut_geometry] = self.classifier.predict_proba(x_test[cut_geometry])[:,1] self.validatePredict(pred, flags_geometry, lon, lat, kwargs['r_physical'], kwargs['abs_mag'], kwargs['distance']) return pred, flags_geometry
[ "def", "predict", "(", "self", ",", "lon", ",", "lat", ",", "*", "*", "kwargs", ")", ":", "assert", "self", ".", "classifier", "is", "not", "None", ",", "'ERROR'", "pred", "=", "np", ".", "zeros", "(", "len", "(", "lon", ")", ")", "cut_geometry", ",", "flags_geometry", "=", "self", ".", "applyGeometry", "(", "lon", ",", "lat", ")", "x_test", "=", "[", "]", "for", "key", ",", "operation", "in", "self", ".", "config", "[", "'operation'", "]", "[", "'params_intrinsic'", "]", ":", "assert", "operation", ".", "lower", "(", ")", "in", "[", "'linear'", ",", "'log'", "]", ",", "'ERROR'", "if", "operation", ".", "lower", "(", ")", "==", "'linear'", ":", "x_test", ".", "append", "(", "kwargs", "[", "key", "]", ")", "else", ":", "x_test", ".", "append", "(", "np", ".", "log10", "(", "kwargs", "[", "key", "]", ")", ")", "x_test", "=", "np", ".", "vstack", "(", "x_test", ")", ".", "T", "#import pdb; pdb.set_trace()", "pred", "[", "cut_geometry", "]", "=", "self", ".", "classifier", ".", "predict_proba", "(", "x_test", "[", "cut_geometry", "]", ")", "[", ":", ",", "1", "]", "self", ".", "validatePredict", "(", "pred", ",", "flags_geometry", ",", "lon", ",", "lat", ",", "kwargs", "[", "'r_physical'", "]", ",", "kwargs", "[", "'abs_mag'", "]", ",", "kwargs", "[", "'distance'", "]", ")", "return", "pred", ",", "flags_geometry" ]
distance, abs_mag, r_physical
[ "distance", "abs_mag", "r_physical" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L425-L449
train
DarkEnergySurvey/ugali
ugali/candidate/associate.py
catalogFactory
def catalogFactory(name, **kwargs): """ Factory for various catalogs. """ fn = lambda member: inspect.isclass(member) and member.__module__==__name__ catalogs = odict(inspect.getmembers(sys.modules[__name__], fn)) if name not in list(catalogs.keys()): msg = "%s not found in catalogs:\n %s"%(name,list(kernels.keys())) logger.error(msg) msg = "Unrecognized catalog: %s"%name raise Exception(msg) return catalogs[name](**kwargs)
python
def catalogFactory(name, **kwargs): """ Factory for various catalogs. """ fn = lambda member: inspect.isclass(member) and member.__module__==__name__ catalogs = odict(inspect.getmembers(sys.modules[__name__], fn)) if name not in list(catalogs.keys()): msg = "%s not found in catalogs:\n %s"%(name,list(kernels.keys())) logger.error(msg) msg = "Unrecognized catalog: %s"%name raise Exception(msg) return catalogs[name](**kwargs)
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Factory for various catalogs.
[ "Factory", "for", "various", "catalogs", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/candidate/associate.py#L447-L460
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
write_results
def write_results(filename,config,srcfile,samples): """ Package everything nicely """ results = createResults(config,srcfile,samples=samples) results.write(filename)
python
def write_results(filename,config,srcfile,samples): """ Package everything nicely """ results = createResults(config,srcfile,samples=samples) results.write(filename)
[ "def", "write_results", "(", "filename", ",", "config", ",", "srcfile", ",", "samples", ")", ":", "results", "=", "createResults", "(", "config", ",", "srcfile", ",", "samples", "=", "samples", ")", "results", ".", "write", "(", "filename", ")" ]
Package everything nicely
[ "Package", "everything", "nicely" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L347-L350
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
Results.estimate
def estimate(self,param,burn=None,clip=10.0,alpha=0.32): """ Estimate parameter value and uncertainties """ # FIXME: Need to add age and metallicity to composite isochrone params (currently properties) if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']: msg = 'Unrecognized parameter: %s'%param raise KeyError(msg) # If the parameter is in the samples if param in self.samples.names: if param.startswith('position_angle'): return self.estimate_position_angle(param,burn=burn, clip=clip,alpha=alpha) return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha) mle = self.get_mle() errors = [np.nan,np.nan] # Set default value to the MLE value if param in self.source.params: err = self.source.params[param].errors if err is not None: errors = err # For age and metallicity from composite isochrone return [float(mle[param]),errors]
python
def estimate(self,param,burn=None,clip=10.0,alpha=0.32): """ Estimate parameter value and uncertainties """ # FIXME: Need to add age and metallicity to composite isochrone params (currently properties) if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']: msg = 'Unrecognized parameter: %s'%param raise KeyError(msg) # If the parameter is in the samples if param in self.samples.names: if param.startswith('position_angle'): return self.estimate_position_angle(param,burn=burn, clip=clip,alpha=alpha) return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha) mle = self.get_mle() errors = [np.nan,np.nan] # Set default value to the MLE value if param in self.source.params: err = self.source.params[param].errors if err is not None: errors = err # For age and metallicity from composite isochrone return [float(mle[param]),errors]
[ "def", "estimate", "(", "self", ",", "param", ",", "burn", "=", "None", ",", "clip", "=", "10.0", ",", "alpha", "=", "0.32", ")", ":", "# FIXME: Need to add age and metallicity to composite isochrone params (currently properties)", "if", "param", "not", "in", "list", "(", "self", ".", "samples", ".", "names", ")", "+", "list", "(", "self", ".", "source", ".", "params", ")", "+", "[", "'age'", ",", "'metallicity'", "]", ":", "msg", "=", "'Unrecognized parameter: %s'", "%", "param", "raise", "KeyError", "(", "msg", ")", "# If the parameter is in the samples", "if", "param", "in", "self", ".", "samples", ".", "names", ":", "if", "param", ".", "startswith", "(", "'position_angle'", ")", ":", "return", "self", ".", "estimate_position_angle", "(", "param", ",", "burn", "=", "burn", ",", "clip", "=", "clip", ",", "alpha", "=", "alpha", ")", "return", "self", ".", "samples", ".", "peak_interval", "(", "param", ",", "burn", "=", "burn", ",", "clip", "=", "clip", ",", "alpha", "=", "alpha", ")", "mle", "=", "self", ".", "get_mle", "(", ")", "errors", "=", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "# Set default value to the MLE value", "if", "param", "in", "self", ".", "source", ".", "params", ":", "err", "=", "self", ".", "source", ".", "params", "[", "param", "]", ".", "errors", "if", "err", "is", "not", "None", ":", "errors", "=", "err", "# For age and metallicity from composite isochrone", "return", "[", "float", "(", "mle", "[", "param", "]", ")", ",", "errors", "]" ]
Estimate parameter value and uncertainties
[ "Estimate", "parameter", "value", "and", "uncertainties" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L57-L81
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
Results.estimate_params
def estimate_params(self,burn=None,clip=10.0,alpha=0.32): """ Estimate all source parameters """ mle = self.get_mle() out = odict() for param in mle.keys(): out[param] = self.estimate(param,burn=burn,clip=clip,alpha=alpha) return out
python
def estimate_params(self,burn=None,clip=10.0,alpha=0.32): """ Estimate all source parameters """ mle = self.get_mle() out = odict() for param in mle.keys(): out[param] = self.estimate(param,burn=burn,clip=clip,alpha=alpha) return out
[ "def", "estimate_params", "(", "self", ",", "burn", "=", "None", ",", "clip", "=", "10.0", ",", "alpha", "=", "0.32", ")", ":", "mle", "=", "self", ".", "get_mle", "(", ")", "out", "=", "odict", "(", ")", "for", "param", "in", "mle", ".", "keys", "(", ")", ":", "out", "[", "param", "]", "=", "self", ".", "estimate", "(", "param", ",", "burn", "=", "burn", ",", "clip", "=", "clip", ",", "alpha", "=", "alpha", ")", "return", "out" ]
Estimate all source parameters
[ "Estimate", "all", "source", "parameters" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L92-L98
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
Results.estimate_position_angle
def estimate_position_angle(self,param='position_angle',burn=None,clip=10.0,alpha=0.32): """ Estimate the position angle from the posterior dealing with periodicity. """ # Transform so peak in the middle of the distribution pa = self.samples.get(param,burn=burn,clip=clip) peak = ugali.utils.stats.kde_peak(pa) shift = 180.*((pa+90-peak)>180) pa -= shift # Get the kde interval ret = ugali.utils.stats.peak_interval(pa,alpha) if ret[0] < 0: ret[0] += 180.; ret[1][0] += 180.; ret[1][1] += 180.; return ret
python
def estimate_position_angle(self,param='position_angle',burn=None,clip=10.0,alpha=0.32): """ Estimate the position angle from the posterior dealing with periodicity. """ # Transform so peak in the middle of the distribution pa = self.samples.get(param,burn=burn,clip=clip) peak = ugali.utils.stats.kde_peak(pa) shift = 180.*((pa+90-peak)>180) pa -= shift # Get the kde interval ret = ugali.utils.stats.peak_interval(pa,alpha) if ret[0] < 0: ret[0] += 180.; ret[1][0] += 180.; ret[1][1] += 180.; return ret
[ "def", "estimate_position_angle", "(", "self", ",", "param", "=", "'position_angle'", ",", "burn", "=", "None", ",", "clip", "=", "10.0", ",", "alpha", "=", "0.32", ")", ":", "# Transform so peak in the middle of the distribution", "pa", "=", "self", ".", "samples", ".", "get", "(", "param", ",", "burn", "=", "burn", ",", "clip", "=", "clip", ")", "peak", "=", "ugali", ".", "utils", ".", "stats", ".", "kde_peak", "(", "pa", ")", "shift", "=", "180.", "*", "(", "(", "pa", "+", "90", "-", "peak", ")", ">", "180", ")", "pa", "-=", "shift", "# Get the kde interval", "ret", "=", "ugali", ".", "utils", ".", "stats", ".", "peak_interval", "(", "pa", ",", "alpha", ")", "if", "ret", "[", "0", "]", "<", "0", ":", "ret", "[", "0", "]", "+=", "180.", "ret", "[", "1", "]", "[", "0", "]", "+=", "180.", "ret", "[", "1", "]", "[", "1", "]", "+=", "180.", "return", "ret" ]
Estimate the position angle from the posterior dealing with periodicity.
[ "Estimate", "the", "position", "angle", "from", "the", "posterior", "dealing", "with", "periodicity", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L100-L113
train
alphagov/performanceplatform-collector
performanceplatform/collector/webtrends/keymetrics.py
Collector.date_range_for_webtrends
def date_range_for_webtrends(cls, start_at=None, end_at=None): """ Get the start and end formatted for query or the last hour if none specified. Unlike reports, this does not aggregate periods and so it is possible to just query a range and parse out the individual hours. """ if start_at and end_at: start_date = cls.parse_standard_date_string_to_date( start_at) end_date = cls.parse_standard_date_string_to_date( end_at) return [( cls.parse_date_for_query(start_date), cls.parse_date_for_query(end_date))] else: return [("current_hour-1", "current_hour-1")]
python
def date_range_for_webtrends(cls, start_at=None, end_at=None): """ Get the start and end formatted for query or the last hour if none specified. Unlike reports, this does not aggregate periods and so it is possible to just query a range and parse out the individual hours. """ if start_at and end_at: start_date = cls.parse_standard_date_string_to_date( start_at) end_date = cls.parse_standard_date_string_to_date( end_at) return [( cls.parse_date_for_query(start_date), cls.parse_date_for_query(end_date))] else: return [("current_hour-1", "current_hour-1")]
[ "def", "date_range_for_webtrends", "(", "cls", ",", "start_at", "=", "None", ",", "end_at", "=", "None", ")", ":", "if", "start_at", "and", "end_at", ":", "start_date", "=", "cls", ".", "parse_standard_date_string_to_date", "(", "start_at", ")", "end_date", "=", "cls", ".", "parse_standard_date_string_to_date", "(", "end_at", ")", "return", "[", "(", "cls", ".", "parse_date_for_query", "(", "start_date", ")", ",", "cls", ".", "parse_date_for_query", "(", "end_date", ")", ")", "]", "else", ":", "return", "[", "(", "\"current_hour-1\"", ",", "\"current_hour-1\"", ")", "]" ]
Get the start and end formatted for query or the last hour if none specified. Unlike reports, this does not aggregate periods and so it is possible to just query a range and parse out the individual hours.
[ "Get", "the", "start", "and", "end", "formatted", "for", "query", "or", "the", "last", "hour", "if", "none", "specified", ".", "Unlike", "reports", "this", "does", "not", "aggregate", "periods", "and", "so", "it", "is", "possible", "to", "just", "query", "a", "range", "and", "parse", "out", "the", "individual", "hours", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/webtrends/keymetrics.py#L15-L32
train
DarkEnergySurvey/ugali
ugali/utils/shell.py
get_ugali_dir
def get_ugali_dir(): """Get the path to the ugali data directory from the environment""" dirname = os.getenv('UGALIDIR') # Get the HOME directory if not dirname: dirname=os.path.join(os.getenv('HOME'),'.ugali') if not os.path.exists(dirname): from ugali.utils.logger import logger msg = "Creating UGALIDIR:\n%s"%dirname logger.warning(msg) return mkdir(dirname)
python
def get_ugali_dir(): """Get the path to the ugali data directory from the environment""" dirname = os.getenv('UGALIDIR') # Get the HOME directory if not dirname: dirname=os.path.join(os.getenv('HOME'),'.ugali') if not os.path.exists(dirname): from ugali.utils.logger import logger msg = "Creating UGALIDIR:\n%s"%dirname logger.warning(msg) return mkdir(dirname)
[ "def", "get_ugali_dir", "(", ")", ":", "dirname", "=", "os", ".", "getenv", "(", "'UGALIDIR'", ")", "# Get the HOME directory", "if", "not", "dirname", ":", "dirname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'HOME'", ")", ",", "'.ugali'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "from", "ugali", ".", "utils", ".", "logger", "import", "logger", "msg", "=", "\"Creating UGALIDIR:\\n%s\"", "%", "dirname", "logger", ".", "warning", "(", "msg", ")", "return", "mkdir", "(", "dirname", ")" ]
Get the path to the ugali data directory from the environment
[ "Get", "the", "path", "to", "the", "ugali", "data", "directory", "from", "the", "environment" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/shell.py#L48-L62
train
DarkEnergySurvey/ugali
ugali/utils/shell.py
get_iso_dir
def get_iso_dir(): """Get the ugali isochrone directory.""" dirname = os.path.join(get_ugali_dir(),'isochrones') if not os.path.exists(dirname): from ugali.utils.logger import logger msg = "Isochrone directory not found:\n%s"%dirname logger.warning(msg) return dirname
python
def get_iso_dir(): """Get the ugali isochrone directory.""" dirname = os.path.join(get_ugali_dir(),'isochrones') if not os.path.exists(dirname): from ugali.utils.logger import logger msg = "Isochrone directory not found:\n%s"%dirname logger.warning(msg) return dirname
[ "def", "get_iso_dir", "(", ")", ":", "dirname", "=", "os", ".", "path", ".", "join", "(", "get_ugali_dir", "(", ")", ",", "'isochrones'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "from", "ugali", ".", "utils", ".", "logger", "import", "logger", "msg", "=", "\"Isochrone directory not found:\\n%s\"", "%", "dirname", "logger", ".", "warning", "(", "msg", ")", "return", "dirname" ]
Get the ugali isochrone directory.
[ "Get", "the", "ugali", "isochrone", "directory", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/shell.py#L64-L73
train
warrenspe/hconf
hconf/Config.py
ConfigManager.registerParser
def registerParser(self, parser): """ Registers a parser to parse configuration inputs. """ if not isinstance(parser, Subparser): raise TypeError("%s is not an instance of a subparser." % parser) self.parsers.append(parser)
python
def registerParser(self, parser): """ Registers a parser to parse configuration inputs. """ if not isinstance(parser, Subparser): raise TypeError("%s is not an instance of a subparser." % parser) self.parsers.append(parser)
[ "def", "registerParser", "(", "self", ",", "parser", ")", ":", "if", "not", "isinstance", "(", "parser", ",", "Subparser", ")", ":", "raise", "TypeError", "(", "\"%s is not an instance of a subparser.\"", "%", "parser", ")", "self", ".", "parsers", ".", "append", "(", "parser", ")" ]
Registers a parser to parse configuration inputs.
[ "Registers", "a", "parser", "to", "parse", "configuration", "inputs", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L65-L73
train
warrenspe/hconf
hconf/Config.py
ConfigManager.addConfig
def addConfig(self, name, default=None, cast=None, required=False, description=None): """ Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag. """ # Validate the name if not self.configNameRE.match(name): raise InvalidConfigurationException("Invalid configuration name: %s" % name) self.configs[self._sanitizeName(name)] = { 'default': default, 'cast': cast, 'required': required, 'description': description }
python
def addConfig(self, name, default=None, cast=None, required=False, description=None): """ Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag. """ # Validate the name if not self.configNameRE.match(name): raise InvalidConfigurationException("Invalid configuration name: %s" % name) self.configs[self._sanitizeName(name)] = { 'default': default, 'cast': cast, 'required': required, 'description': description }
[ "def", "addConfig", "(", "self", ",", "name", ",", "default", "=", "None", ",", "cast", "=", "None", ",", "required", "=", "False", ",", "description", "=", "None", ")", ":", "# Validate the name", "if", "not", "self", ".", "configNameRE", ".", "match", "(", "name", ")", ":", "raise", "InvalidConfigurationException", "(", "\"Invalid configuration name: %s\"", "%", "name", ")", "self", ".", "configs", "[", "self", ".", "_sanitizeName", "(", "name", ")", "]", "=", "{", "'default'", ":", "default", ",", "'cast'", ":", "cast", ",", "'required'", ":", "required", ",", "'description'", ":", "description", "}" ]
Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag.
[ "Adds", "the", "given", "configuration", "option", "to", "the", "ConfigManager", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L75-L98
train
warrenspe/hconf
hconf/Config.py
ConfigManager.parse
def parse(self): """ Executes the registered parsers to parse input configurations. """ self._config = _Config() self._setDefaults() for parser in self.parsers: for key, value in parser.parse(self, self._config).items(): key = self._sanitizeName(key) if key not in self.configs: raise UnknownConfigurationException(key) if value is not None: self._setConfig(key, value) self._ensureRequired() self._cast() return self._config
python
def parse(self): """ Executes the registered parsers to parse input configurations. """ self._config = _Config() self._setDefaults() for parser in self.parsers: for key, value in parser.parse(self, self._config).items(): key = self._sanitizeName(key) if key not in self.configs: raise UnknownConfigurationException(key) if value is not None: self._setConfig(key, value) self._ensureRequired() self._cast() return self._config
[ "def", "parse", "(", "self", ")", ":", "self", ".", "_config", "=", "_Config", "(", ")", "self", ".", "_setDefaults", "(", ")", "for", "parser", "in", "self", ".", "parsers", ":", "for", "key", ",", "value", "in", "parser", ".", "parse", "(", "self", ",", "self", ".", "_config", ")", ".", "items", "(", ")", ":", "key", "=", "self", ".", "_sanitizeName", "(", "key", ")", "if", "key", "not", "in", "self", ".", "configs", ":", "raise", "UnknownConfigurationException", "(", "key", ")", "if", "value", "is", "not", "None", ":", "self", ".", "_setConfig", "(", "key", ",", "value", ")", "self", ".", "_ensureRequired", "(", ")", "self", ".", "_cast", "(", ")", "return", "self", ".", "_config" ]
Executes the registered parsers to parse input configurations.
[ "Executes", "the", "registered", "parsers", "to", "parse", "input", "configurations", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L100-L120
train
warrenspe/hconf
hconf/Config.py
ConfigManager._setDefaults
def _setDefaults(self): """ Sets all the expected configuration options on the config object as either the requested default value, or None. """ for configName, configDict in self.configs.items(): self._setConfig(configName, configDict['default'])
python
def _setDefaults(self): """ Sets all the expected configuration options on the config object as either the requested default value, or None. """ for configName, configDict in self.configs.items(): self._setConfig(configName, configDict['default'])
[ "def", "_setDefaults", "(", "self", ")", ":", "for", "configName", ",", "configDict", "in", "self", ".", "configs", ".", "items", "(", ")", ":", "self", ".", "_setConfig", "(", "configName", ",", "configDict", "[", "'default'", "]", ")" ]
Sets all the expected configuration options on the config object as either the requested default value, or None.
[ "Sets", "all", "the", "expected", "configuration", "options", "on", "the", "config", "object", "as", "either", "the", "requested", "default", "value", "or", "None", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L122-L128
train
warrenspe/hconf
hconf/Config.py
ConfigManager._cast
def _cast(self): """ Iterates through our parsed configuration options and cast any options with marked cast types. """ for configName, configDict in self.configs.items(): if configDict['cast'] is not None: configValue = getattr(self._config, configName) if configValue is not None: try: self._setConfig(configName, configDict['cast'](configValue)) except: raise InvalidConfigurationException("%s: %r" % (configName, configValue))
python
def _cast(self): """ Iterates through our parsed configuration options and cast any options with marked cast types. """ for configName, configDict in self.configs.items(): if configDict['cast'] is not None: configValue = getattr(self._config, configName) if configValue is not None: try: self._setConfig(configName, configDict['cast'](configValue)) except: raise InvalidConfigurationException("%s: %r" % (configName, configValue))
[ "def", "_cast", "(", "self", ")", ":", "for", "configName", ",", "configDict", "in", "self", ".", "configs", ".", "items", "(", ")", ":", "if", "configDict", "[", "'cast'", "]", "is", "not", "None", ":", "configValue", "=", "getattr", "(", "self", ".", "_config", ",", "configName", ")", "if", "configValue", "is", "not", "None", ":", "try", ":", "self", ".", "_setConfig", "(", "configName", ",", "configDict", "[", "'cast'", "]", "(", "configValue", ")", ")", "except", ":", "raise", "InvalidConfigurationException", "(", "\"%s: %r\"", "%", "(", "configName", ",", "configValue", ")", ")" ]
Iterates through our parsed configuration options and cast any options with marked cast types.
[ "Iterates", "through", "our", "parsed", "configuration", "options", "and", "cast", "any", "options", "with", "marked", "cast", "types", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L141-L154
train
juju/theblues
theblues/jimm.py
JIMM.list_models
def list_models(self, macaroons): """ Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments. """ return make_request("{}model".format(self.url), timeout=self.timeout, client=self._client, cookies=self.cookies)
python
def list_models(self, macaroons): """ Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments. """ return make_request("{}model".format(self.url), timeout=self.timeout, client=self._client, cookies=self.cookies)
[ "def", "list_models", "(", "self", ",", "macaroons", ")", ":", "return", "make_request", "(", "\"{}model\"", ".", "format", "(", "self", ".", "url", ")", ",", "timeout", "=", "self", ".", "timeout", ",", "client", "=", "self", ".", "_client", ",", "cookies", "=", "self", ".", "cookies", ")" ]
Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments.
[ "Get", "the", "logged", "in", "user", "s", "models", "from", "the", "JIMM", "controller", "." ]
f4431f29e43d04fc32f38f4f86cea45cd4e6ae98
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/jimm.py#L31-L38
train
accraze/python-markov-novel
src/markov_novel/novel.py
Novel.write
def write(self, novel_title='novel', filetype='txt'): """ Composes chapters and writes the novel to a text file """ self._compose_chapters() self._write_to_file(novel_title, filetype)
python
def write(self, novel_title='novel', filetype='txt'): """ Composes chapters and writes the novel to a text file """ self._compose_chapters() self._write_to_file(novel_title, filetype)
[ "def", "write", "(", "self", ",", "novel_title", "=", "'novel'", ",", "filetype", "=", "'txt'", ")", ":", "self", ".", "_compose_chapters", "(", ")", "self", ".", "_write_to_file", "(", "novel_title", ",", "filetype", ")" ]
Composes chapters and writes the novel to a text file
[ "Composes", "chapters", "and", "writes", "the", "novel", "to", "a", "text", "file" ]
ff451639e93a3ac11fb0268b92bc0cffc00bfdbe
https://github.com/accraze/python-markov-novel/blob/ff451639e93a3ac11fb0268b92bc0cffc00bfdbe/src/markov_novel/novel.py#L15-L21
train
accraze/python-markov-novel
src/markov_novel/novel.py
Novel._compose_chapters
def _compose_chapters(self): """ Creates a chapters and appends them to list """ for count in range(self.chapter_count): chapter_num = count + 1 c = Chapter(self.markov, chapter_num) self.chapters.append(c)
python
def _compose_chapters(self): """ Creates a chapters and appends them to list """ for count in range(self.chapter_count): chapter_num = count + 1 c = Chapter(self.markov, chapter_num) self.chapters.append(c)
[ "def", "_compose_chapters", "(", "self", ")", ":", "for", "count", "in", "range", "(", "self", ".", "chapter_count", ")", ":", "chapter_num", "=", "count", "+", "1", "c", "=", "Chapter", "(", "self", ".", "markov", ",", "chapter_num", ")", "self", ".", "chapters", ".", "append", "(", "c", ")" ]
Creates a chapters and appends them to list
[ "Creates", "a", "chapters", "and", "appends", "them", "to", "list" ]
ff451639e93a3ac11fb0268b92bc0cffc00bfdbe
https://github.com/accraze/python-markov-novel/blob/ff451639e93a3ac11fb0268b92bc0cffc00bfdbe/src/markov_novel/novel.py#L23-L31
train
joeyespo/path-and-address
path_and_address/validation.py
valid_address
def valid_address(address): """ Determines whether the specified address string is valid. """ if not address: return False components = str(address).split(':') if len(components) > 2 or not valid_hostname(components[0]): return False if len(components) == 2 and not valid_port(components[1]): return False return True
python
def valid_address(address): """ Determines whether the specified address string is valid. """ if not address: return False components = str(address).split(':') if len(components) > 2 or not valid_hostname(components[0]): return False if len(components) == 2 and not valid_port(components[1]): return False return True
[ "def", "valid_address", "(", "address", ")", ":", "if", "not", "address", ":", "return", "False", "components", "=", "str", "(", "address", ")", ".", "split", "(", "':'", ")", "if", "len", "(", "components", ")", ">", "2", "or", "not", "valid_hostname", "(", "components", "[", "0", "]", ")", ":", "return", "False", "if", "len", "(", "components", ")", "==", "2", "and", "not", "valid_port", "(", "components", "[", "1", "]", ")", ":", "return", "False", "return", "True" ]
Determines whether the specified address string is valid.
[ "Determines", "whether", "the", "specified", "address", "string", "is", "valid", "." ]
f8193a09f4b785574d920e8a2aeeb55ea6ff4e20
https://github.com/joeyespo/path-and-address/blob/f8193a09f4b785574d920e8a2aeeb55ea6ff4e20/path_and_address/validation.py#L7-L21
train
joeyespo/path-and-address
path_and_address/validation.py
valid_hostname
def valid_hostname(host): """ Returns whether the specified string is a valid hostname. """ if len(host) > 255: return False if host[-1:] == '.': host = host[:-1] return all(_hostname_re.match(c) for c in host.split('.'))
python
def valid_hostname(host): """ Returns whether the specified string is a valid hostname. """ if len(host) > 255: return False if host[-1:] == '.': host = host[:-1] return all(_hostname_re.match(c) for c in host.split('.'))
[ "def", "valid_hostname", "(", "host", ")", ":", "if", "len", "(", "host", ")", ">", "255", ":", "return", "False", "if", "host", "[", "-", "1", ":", "]", "==", "'.'", ":", "host", "=", "host", "[", ":", "-", "1", "]", "return", "all", "(", "_hostname_re", ".", "match", "(", "c", ")", "for", "c", "in", "host", ".", "split", "(", "'.'", ")", ")" ]
Returns whether the specified string is a valid hostname.
[ "Returns", "whether", "the", "specified", "string", "is", "a", "valid", "hostname", "." ]
f8193a09f4b785574d920e8a2aeeb55ea6ff4e20
https://github.com/joeyespo/path-and-address/blob/f8193a09f4b785574d920e8a2aeeb55ea6ff4e20/path_and_address/validation.py#L24-L34
train
DarkEnergySurvey/ugali
ugali/analysis/imf.py
IMF.sample
def sample(self, n, mass_min=0.1, mass_max=10., steps=10000, seed=None): """ Sample initial mass values between mass_min and mass_max, following the IMF distribution. ADW: Should this be `sample` or `simulate`? Parameters: ----------- n : number of samples to draw mass_min : minimum mass to sample from mass_max : maximum mass to sample from steps : number of steps for isochrone sampling seed : random seed (passed to np.random.seed) Returns: -------- mass : array of randomly sampled mass values """ if seed is not None: np.random.seed(seed) d_mass = (mass_max - mass_min) / float(steps) mass = np.linspace(mass_min, mass_max, steps) cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.) cdf = cdf / cdf[-1] f = scipy.interpolate.interp1d(cdf, mass) return f(np.random.uniform(size=n))
python
def sample(self, n, mass_min=0.1, mass_max=10., steps=10000, seed=None): """ Sample initial mass values between mass_min and mass_max, following the IMF distribution. ADW: Should this be `sample` or `simulate`? Parameters: ----------- n : number of samples to draw mass_min : minimum mass to sample from mass_max : maximum mass to sample from steps : number of steps for isochrone sampling seed : random seed (passed to np.random.seed) Returns: -------- mass : array of randomly sampled mass values """ if seed is not None: np.random.seed(seed) d_mass = (mass_max - mass_min) / float(steps) mass = np.linspace(mass_min, mass_max, steps) cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.) cdf = cdf / cdf[-1] f = scipy.interpolate.interp1d(cdf, mass) return f(np.random.uniform(size=n))
[ "def", "sample", "(", "self", ",", "n", ",", "mass_min", "=", "0.1", ",", "mass_max", "=", "10.", ",", "steps", "=", "10000", ",", "seed", "=", "None", ")", ":", "if", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "d_mass", "=", "(", "mass_max", "-", "mass_min", ")", "/", "float", "(", "steps", ")", "mass", "=", "np", ".", "linspace", "(", "mass_min", ",", "mass_max", ",", "steps", ")", "cdf", "=", "np", ".", "insert", "(", "np", ".", "cumsum", "(", "d_mass", "*", "self", ".", "pdf", "(", "mass", "[", "1", ":", "]", ",", "log_mode", "=", "False", ")", ")", ",", "0", ",", "0.", ")", "cdf", "=", "cdf", "/", "cdf", "[", "-", "1", "]", "f", "=", "scipy", ".", "interpolate", ".", "interp1d", "(", "cdf", ",", "mass", ")", "return", "f", "(", "np", ".", "random", ".", "uniform", "(", "size", "=", "n", ")", ")" ]
Sample initial mass values between mass_min and mass_max, following the IMF distribution. ADW: Should this be `sample` or `simulate`? Parameters: ----------- n : number of samples to draw mass_min : minimum mass to sample from mass_max : maximum mass to sample from steps : number of steps for isochrone sampling seed : random seed (passed to np.random.seed) Returns: -------- mass : array of randomly sampled mass values
[ "Sample", "initial", "mass", "values", "between", "mass_min", "and", "mass_max", "following", "the", "IMF", "distribution", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/imf.py#L56-L81
train
DarkEnergySurvey/ugali
ugali/analysis/imf.py
Kroupa2001.pdf
def pdf(cls, mass, log_mode=True): """ PDF for the Kroupa IMF. Normalization is set over the mass range from 0.1 Msun to 100 Msun """ log_mass = np.log10(mass) # From Eq 2 mb = mbreak = [0.08, 0.5] # Msun a = alpha = [0.3, 1.3, 2.3] # alpha # Normalization set from 0.1 -- 100 Msun norm = 0.27947743949440446 b = 1./norm c = b * mbreak[0]**(alpha[1]-alpha[0]) d = c * mbreak[1]**(alpha[2]-alpha[1]) dn_dm = b * (mass < 0.08) * mass**(-alpha[0]) dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1]) dn_dm += d * (0.5 <= mass) * mass**(-alpha[2]) if log_mode: # Number per logarithmic mass range, i.e., dN/dlog(M) return dn_dm * (mass * np.log(10)) else: # Number per linear mass range, i.e., dN/dM return dn_dm
python
def pdf(cls, mass, log_mode=True): """ PDF for the Kroupa IMF. Normalization is set over the mass range from 0.1 Msun to 100 Msun """ log_mass = np.log10(mass) # From Eq 2 mb = mbreak = [0.08, 0.5] # Msun a = alpha = [0.3, 1.3, 2.3] # alpha # Normalization set from 0.1 -- 100 Msun norm = 0.27947743949440446 b = 1./norm c = b * mbreak[0]**(alpha[1]-alpha[0]) d = c * mbreak[1]**(alpha[2]-alpha[1]) dn_dm = b * (mass < 0.08) * mass**(-alpha[0]) dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1]) dn_dm += d * (0.5 <= mass) * mass**(-alpha[2]) if log_mode: # Number per logarithmic mass range, i.e., dN/dlog(M) return dn_dm * (mass * np.log(10)) else: # Number per linear mass range, i.e., dN/dM return dn_dm
[ "def", "pdf", "(", "cls", ",", "mass", ",", "log_mode", "=", "True", ")", ":", "log_mass", "=", "np", ".", "log10", "(", "mass", ")", "# From Eq 2", "mb", "=", "mbreak", "=", "[", "0.08", ",", "0.5", "]", "# Msun", "a", "=", "alpha", "=", "[", "0.3", ",", "1.3", ",", "2.3", "]", "# alpha", "# Normalization set from 0.1 -- 100 Msun", "norm", "=", "0.27947743949440446", "b", "=", "1.", "/", "norm", "c", "=", "b", "*", "mbreak", "[", "0", "]", "**", "(", "alpha", "[", "1", "]", "-", "alpha", "[", "0", "]", ")", "d", "=", "c", "*", "mbreak", "[", "1", "]", "**", "(", "alpha", "[", "2", "]", "-", "alpha", "[", "1", "]", ")", "dn_dm", "=", "b", "*", "(", "mass", "<", "0.08", ")", "*", "mass", "**", "(", "-", "alpha", "[", "0", "]", ")", "dn_dm", "+=", "c", "*", "(", "0.08", "<=", "mass", ")", "*", "(", "mass", "<", "0.5", ")", "*", "mass", "**", "(", "-", "alpha", "[", "1", "]", ")", "dn_dm", "+=", "d", "*", "(", "0.5", "<=", "mass", ")", "*", "mass", "**", "(", "-", "alpha", "[", "2", "]", ")", "if", "log_mode", ":", "# Number per logarithmic mass range, i.e., dN/dlog(M)", "return", "dn_dm", "*", "(", "mass", "*", "np", ".", "log", "(", "10", ")", ")", "else", ":", "# Number per linear mass range, i.e., dN/dM", "return", "dn_dm" ]
PDF for the Kroupa IMF. Normalization is set over the mass range from 0.1 Msun to 100 Msun
[ "PDF", "for", "the", "Kroupa", "IMF", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/imf.py#L155-L181
train
DarkEnergySurvey/ugali
ugali/analysis/imf.py
Salpeter1955.pdf
def pdf(cls, mass, log_mode=True): """ PDF for the Salpeter IMF. Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun """ alpha = 2.35 a = 0.060285569480482866 dn_dm = a * mass**(-alpha) if log_mode: # Number per logarithmic mass range, i.e., dN/dlog(M) return dn_dm * (mass * np.log(10)) else: # Number per linear mass range, i.e., dN/dM return dn_dm
python
def pdf(cls, mass, log_mode=True): """ PDF for the Salpeter IMF. Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun """ alpha = 2.35 a = 0.060285569480482866 dn_dm = a * mass**(-alpha) if log_mode: # Number per logarithmic mass range, i.e., dN/dlog(M) return dn_dm * (mass * np.log(10)) else: # Number per linear mass range, i.e., dN/dM return dn_dm
[ "def", "pdf", "(", "cls", ",", "mass", ",", "log_mode", "=", "True", ")", ":", "alpha", "=", "2.35", "a", "=", "0.060285569480482866", "dn_dm", "=", "a", "*", "mass", "**", "(", "-", "alpha", ")", "if", "log_mode", ":", "# Number per logarithmic mass range, i.e., dN/dlog(M)", "return", "dn_dm", "*", "(", "mass", "*", "np", ".", "log", "(", "10", ")", ")", "else", ":", "# Number per linear mass range, i.e., dN/dM", "return", "dn_dm" ]
PDF for the Salpeter IMF. Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun
[ "PDF", "for", "the", "Salpeter", "IMF", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/imf.py#L191-L206
train
warrenspe/hconf
hconf/subparsers/_subparser.py
ConfigFileSubparser._getConfigFile
def _getConfigFile(self, config): """ Retrieves a file descriptor to a configuration file to process. Inputs: config - The _Config object which is being populated. Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None. """ joinPath = lambda p: (os.path.join(p) if isinstance(p, (tuple, list)) else p) if self.filepathConfig is not None and self.filenameConfig is not None: if hasattr(config, self.filepathConfig) and hasattr(config, self.filenameConfig): path = joinPath(getattr(config, self.filepathConfig)) name = getattr(config, self.filenameConfig) if os.path.isfile(os.path.join(path, name)): return open(os.path.join(path, name), 'r') if self.filepath is not None and self.filename is not None: path = joinPath(self.filepath) name = self.filename if os.path.isfile(os.path.join(path, name)): return open(os.path.join(path, name), 'r')
python
def _getConfigFile(self, config): """ Retrieves a file descriptor to a configuration file to process. Inputs: config - The _Config object which is being populated. Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None. """ joinPath = lambda p: (os.path.join(p) if isinstance(p, (tuple, list)) else p) if self.filepathConfig is not None and self.filenameConfig is not None: if hasattr(config, self.filepathConfig) and hasattr(config, self.filenameConfig): path = joinPath(getattr(config, self.filepathConfig)) name = getattr(config, self.filenameConfig) if os.path.isfile(os.path.join(path, name)): return open(os.path.join(path, name), 'r') if self.filepath is not None and self.filename is not None: path = joinPath(self.filepath) name = self.filename if os.path.isfile(os.path.join(path, name)): return open(os.path.join(path, name), 'r')
[ "def", "_getConfigFile", "(", "self", ",", "config", ")", ":", "joinPath", "=", "lambda", "p", ":", "(", "os", ".", "path", ".", "join", "(", "p", ")", "if", "isinstance", "(", "p", ",", "(", "tuple", ",", "list", ")", ")", "else", "p", ")", "if", "self", ".", "filepathConfig", "is", "not", "None", "and", "self", ".", "filenameConfig", "is", "not", "None", ":", "if", "hasattr", "(", "config", ",", "self", ".", "filepathConfig", ")", "and", "hasattr", "(", "config", ",", "self", ".", "filenameConfig", ")", ":", "path", "=", "joinPath", "(", "getattr", "(", "config", ",", "self", ".", "filepathConfig", ")", ")", "name", "=", "getattr", "(", "config", ",", "self", ".", "filenameConfig", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ")", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ",", "'r'", ")", "if", "self", ".", "filepath", "is", "not", "None", "and", "self", ".", "filename", "is", "not", "None", ":", "path", "=", "joinPath", "(", "self", ".", "filepath", ")", "name", "=", "self", ".", "filename", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ")", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", ",", "'r'", ")" ]
Retrieves a file descriptor to a configuration file to process. Inputs: config - The _Config object which is being populated. Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None.
[ "Retrieves", "a", "file", "descriptor", "to", "a", "configuration", "file", "to", "process", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/_subparser.py#L83-L107
train
schlamar/latexmk.py
latexmake.py
_count_citations
def _count_citations(aux_file): ''' Counts the citations in an aux-file. @return: defaultdict(int) - {citation_name: number, ...} ''' counter = defaultdict(int) with open(aux_file) as fobj: content = fobj.read() for match in CITE_PATTERN.finditer(content): name = match.groups()[0] counter[name] += 1 return counter
python
def _count_citations(aux_file): ''' Counts the citations in an aux-file. @return: defaultdict(int) - {citation_name: number, ...} ''' counter = defaultdict(int) with open(aux_file) as fobj: content = fobj.read() for match in CITE_PATTERN.finditer(content): name = match.groups()[0] counter[name] += 1 return counter
[ "def", "_count_citations", "(", "aux_file", ")", ":", "counter", "=", "defaultdict", "(", "int", ")", "with", "open", "(", "aux_file", ")", "as", "fobj", ":", "content", "=", "fobj", ".", "read", "(", ")", "for", "match", "in", "CITE_PATTERN", ".", "finditer", "(", "content", ")", ":", "name", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "counter", "[", "name", "]", "+=", "1", "return", "counter" ]
Counts the citations in an aux-file. @return: defaultdict(int) - {citation_name: number, ...}
[ "Counts", "the", "citations", "in", "an", "aux", "-", "file", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L429-L443
train
schlamar/latexmk.py
latexmake.py
LatexMaker._setup_logger
def _setup_logger(self): '''Set up a logger.''' log = logging.getLogger('latexmk.py') handler = logging.StreamHandler() log.addHandler(handler) if self.opt.verbose: log.setLevel(logging.INFO) return log
python
def _setup_logger(self): '''Set up a logger.''' log = logging.getLogger('latexmk.py') handler = logging.StreamHandler() log.addHandler(handler) if self.opt.verbose: log.setLevel(logging.INFO) return log
[ "def", "_setup_logger", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "'latexmk.py'", ")", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "log", ".", "addHandler", "(", "handler", ")", "if", "self", ".", "opt", ".", "verbose", ":", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "return", "log" ]
Set up a logger.
[ "Set", "up", "a", "logger", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L84-L93
train
schlamar/latexmk.py
latexmake.py
LatexMaker._parse_texlipse_config
def _parse_texlipse_config(self): ''' Read the project name from the texlipse config file ".texlipse". ''' # If Eclipse's workspace refresh, the # ".texlipse"-File will be newly created, # so try again after short sleep if # the file is still missing. if not os.path.isfile('.texlipse'): time.sleep(0.1) if not os.path.isfile('.texlipse'): self.log.error('! Fatal error: File .texlipse is missing.') self.log.error('! Exiting...') sys.exit(1) with open('.texlipse') as fobj: content = fobj.read() match = TEXLIPSE_MAIN_PATTERN.search(content) if match: project_name = match.groups()[0] self.log.info('Found inputfile in ".texlipse": %s.tex' % project_name) return project_name else: self.log.error('! Fatal error: Parsing .texlipse failed.') self.log.error('! Exiting...') sys.exit(1)
python
def _parse_texlipse_config(self): ''' Read the project name from the texlipse config file ".texlipse". ''' # If Eclipse's workspace refresh, the # ".texlipse"-File will be newly created, # so try again after short sleep if # the file is still missing. if not os.path.isfile('.texlipse'): time.sleep(0.1) if not os.path.isfile('.texlipse'): self.log.error('! Fatal error: File .texlipse is missing.') self.log.error('! Exiting...') sys.exit(1) with open('.texlipse') as fobj: content = fobj.read() match = TEXLIPSE_MAIN_PATTERN.search(content) if match: project_name = match.groups()[0] self.log.info('Found inputfile in ".texlipse": %s.tex' % project_name) return project_name else: self.log.error('! Fatal error: Parsing .texlipse failed.') self.log.error('! Exiting...') sys.exit(1)
[ "def", "_parse_texlipse_config", "(", "self", ")", ":", "# If Eclipse's workspace refresh, the", "# \".texlipse\"-File will be newly created,", "# so try again after short sleep if", "# the file is still missing.", "if", "not", "os", ".", "path", ".", "isfile", "(", "'.texlipse'", ")", ":", "time", ".", "sleep", "(", "0.1", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "'.texlipse'", ")", ":", "self", ".", "log", ".", "error", "(", "'! Fatal error: File .texlipse is missing.'", ")", "self", ".", "log", ".", "error", "(", "'! Exiting...'", ")", "sys", ".", "exit", "(", "1", ")", "with", "open", "(", "'.texlipse'", ")", "as", "fobj", ":", "content", "=", "fobj", ".", "read", "(", ")", "match", "=", "TEXLIPSE_MAIN_PATTERN", ".", "search", "(", "content", ")", "if", "match", ":", "project_name", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "self", ".", "log", ".", "info", "(", "'Found inputfile in \".texlipse\": %s.tex'", "%", "project_name", ")", "return", "project_name", "else", ":", "self", ".", "log", ".", "error", "(", "'! Fatal error: Parsing .texlipse failed.'", ")", "self", ".", "log", ".", "error", "(", "'! Exiting...'", ")", "sys", ".", "exit", "(", "1", ")" ]
Read the project name from the texlipse config file ".texlipse".
[ "Read", "the", "project", "name", "from", "the", "texlipse", "config", "file", ".", "texlipse", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L95-L122
train
schlamar/latexmk.py
latexmake.py
LatexMaker._read_latex_files
def _read_latex_files(self): ''' Check if some latex output files exist before first latex run, process them and return the generated data. - Parsing *.aux for citations counter and existing glossaries. - Getting content of files to detect changes. - *.toc file - all available glossaries files ''' if os.path.isfile('%s.aux' % self.project_name): cite_counter = self.generate_citation_counter() self.read_glossaries() else: cite_counter = {'%s.aux' % self.project_name: defaultdict(int)} fname = '%s.toc' % self.project_name if os.path.isfile(fname): with open(fname) as fobj: toc_file = fobj.read() else: toc_file = '' gloss_files = dict() for gloss in self.glossaries: ext = self.glossaries[gloss][1] filename = '%s.%s' % (self.project_name, ext) if os.path.isfile(filename): with open(filename) as fobj: gloss_files[gloss] = fobj.read() return cite_counter, toc_file, gloss_files
python
def _read_latex_files(self): ''' Check if some latex output files exist before first latex run, process them and return the generated data. - Parsing *.aux for citations counter and existing glossaries. - Getting content of files to detect changes. - *.toc file - all available glossaries files ''' if os.path.isfile('%s.aux' % self.project_name): cite_counter = self.generate_citation_counter() self.read_glossaries() else: cite_counter = {'%s.aux' % self.project_name: defaultdict(int)} fname = '%s.toc' % self.project_name if os.path.isfile(fname): with open(fname) as fobj: toc_file = fobj.read() else: toc_file = '' gloss_files = dict() for gloss in self.glossaries: ext = self.glossaries[gloss][1] filename = '%s.%s' % (self.project_name, ext) if os.path.isfile(filename): with open(filename) as fobj: gloss_files[gloss] = fobj.read() return cite_counter, toc_file, gloss_files
[ "def", "_read_latex_files", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "'%s.aux'", "%", "self", ".", "project_name", ")", ":", "cite_counter", "=", "self", ".", "generate_citation_counter", "(", ")", "self", ".", "read_glossaries", "(", ")", "else", ":", "cite_counter", "=", "{", "'%s.aux'", "%", "self", ".", "project_name", ":", "defaultdict", "(", "int", ")", "}", "fname", "=", "'%s.toc'", "%", "self", ".", "project_name", "if", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "with", "open", "(", "fname", ")", "as", "fobj", ":", "toc_file", "=", "fobj", ".", "read", "(", ")", "else", ":", "toc_file", "=", "''", "gloss_files", "=", "dict", "(", ")", "for", "gloss", "in", "self", ".", "glossaries", ":", "ext", "=", "self", ".", "glossaries", "[", "gloss", "]", "[", "1", "]", "filename", "=", "'%s.%s'", "%", "(", "self", ".", "project_name", ",", "ext", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "fobj", ":", "gloss_files", "[", "gloss", "]", "=", "fobj", ".", "read", "(", ")", "return", "cite_counter", ",", "toc_file", ",", "gloss_files" ]
Check if some latex output files exist before first latex run, process them and return the generated data. - Parsing *.aux for citations counter and existing glossaries. - Getting content of files to detect changes. - *.toc file - all available glossaries files
[ "Check", "if", "some", "latex", "output", "files", "exist", "before", "first", "latex", "run", "process", "them", "and", "return", "the", "generated", "data", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L124-L158
train
schlamar/latexmk.py
latexmake.py
LatexMaker.read_glossaries
def read_glossaries(self): ''' Read all existing glossaries in the main aux-file. ''' filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() pattern = r'\\@newglossary\{(.*)\}\{.*\}\{(.*)\}\{(.*)\}' for match in re.finditer(pattern, main_aux): name, ext_i, ext_o = match.groups() self.glossaries[name] = (ext_i, ext_o)
python
def read_glossaries(self): ''' Read all existing glossaries in the main aux-file. ''' filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() pattern = r'\\@newglossary\{(.*)\}\{.*\}\{(.*)\}\{(.*)\}' for match in re.finditer(pattern, main_aux): name, ext_i, ext_o = match.groups() self.glossaries[name] = (ext_i, ext_o)
[ "def", "read_glossaries", "(", "self", ")", ":", "filename", "=", "'%s.aux'", "%", "self", ".", "project_name", "with", "open", "(", "filename", ")", "as", "fobj", ":", "main_aux", "=", "fobj", ".", "read", "(", ")", "pattern", "=", "r'\\\\@newglossary\\{(.*)\\}\\{.*\\}\\{(.*)\\}\\{(.*)\\}'", "for", "match", "in", "re", ".", "finditer", "(", "pattern", ",", "main_aux", ")", ":", "name", ",", "ext_i", ",", "ext_o", "=", "match", ".", "groups", "(", ")", "self", ".", "glossaries", "[", "name", "]", "=", "(", "ext_i", ",", "ext_o", ")" ]
Read all existing glossaries in the main aux-file.
[ "Read", "all", "existing", "glossaries", "in", "the", "main", "aux", "-", "file", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L204-L215
train
schlamar/latexmk.py
latexmake.py
LatexMaker.check_errors
def check_errors(self): ''' Check if errors occured during a latex run by scanning the output. ''' errors = ERROR_PATTTERN.findall(self.out) # "errors" is a list of tuples if errors: self.log.error('! Errors occurred:') self.log.error('\n'.join( [error.replace('\r', '').strip() for error in chain(*errors) if error.strip()] )) self.log.error('! See "%s.log" for details.' % self.project_name) if self.opt.exit_on_error: self.log.error('! Exiting...') sys.exit(1)
python
def check_errors(self): ''' Check if errors occured during a latex run by scanning the output. ''' errors = ERROR_PATTTERN.findall(self.out) # "errors" is a list of tuples if errors: self.log.error('! Errors occurred:') self.log.error('\n'.join( [error.replace('\r', '').strip() for error in chain(*errors) if error.strip()] )) self.log.error('! See "%s.log" for details.' % self.project_name) if self.opt.exit_on_error: self.log.error('! Exiting...') sys.exit(1)
[ "def", "check_errors", "(", "self", ")", ":", "errors", "=", "ERROR_PATTTERN", ".", "findall", "(", "self", ".", "out", ")", "# \"errors\" is a list of tuples", "if", "errors", ":", "self", ".", "log", ".", "error", "(", "'! Errors occurred:'", ")", "self", ".", "log", ".", "error", "(", "'\\n'", ".", "join", "(", "[", "error", ".", "replace", "(", "'\\r'", ",", "''", ")", ".", "strip", "(", ")", "for", "error", "in", "chain", "(", "*", "errors", ")", "if", "error", ".", "strip", "(", ")", "]", ")", ")", "self", ".", "log", ".", "error", "(", "'! See \"%s.log\" for details.'", "%", "self", ".", "project_name", ")", "if", "self", ".", "opt", ".", "exit_on_error", ":", "self", ".", "log", ".", "error", "(", "'! Exiting...'", ")", "sys", ".", "exit", "(", "1", ")" ]
Check if errors occured during a latex run by scanning the output.
[ "Check", "if", "errors", "occured", "during", "a", "latex", "run", "by", "scanning", "the", "output", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L217-L236
train
schlamar/latexmk.py
latexmake.py
LatexMaker.generate_citation_counter
def generate_citation_counter(self): ''' Generate dictionary with the number of citations in all included files. If this changes after the first latex run, you have to run "bibtex". ''' cite_counter = dict() filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() cite_counter[filename] = _count_citations(filename) for match in re.finditer(r'\\@input\{(.*.aux)\}', main_aux): filename = match.groups()[0] try: counter = _count_citations(filename) except IOError: pass else: cite_counter[filename] = counter return cite_counter
python
def generate_citation_counter(self): ''' Generate dictionary with the number of citations in all included files. If this changes after the first latex run, you have to run "bibtex". ''' cite_counter = dict() filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() cite_counter[filename] = _count_citations(filename) for match in re.finditer(r'\\@input\{(.*.aux)\}', main_aux): filename = match.groups()[0] try: counter = _count_citations(filename) except IOError: pass else: cite_counter[filename] = counter return cite_counter
[ "def", "generate_citation_counter", "(", "self", ")", ":", "cite_counter", "=", "dict", "(", ")", "filename", "=", "'%s.aux'", "%", "self", ".", "project_name", "with", "open", "(", "filename", ")", "as", "fobj", ":", "main_aux", "=", "fobj", ".", "read", "(", ")", "cite_counter", "[", "filename", "]", "=", "_count_citations", "(", "filename", ")", "for", "match", "in", "re", ".", "finditer", "(", "r'\\\\@input\\{(.*.aux)\\}'", ",", "main_aux", ")", ":", "filename", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "try", ":", "counter", "=", "_count_citations", "(", "filename", ")", "except", "IOError", ":", "pass", "else", ":", "cite_counter", "[", "filename", "]", "=", "counter", "return", "cite_counter" ]
Generate dictionary with the number of citations in all included files. If this changes after the first latex run, you have to run "bibtex".
[ "Generate", "dictionary", "with", "the", "number", "of", "citations", "in", "all", "included", "files", ".", "If", "this", "changes", "after", "the", "first", "latex", "run", "you", "have", "to", "run", "bibtex", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L238-L259
train
schlamar/latexmk.py
latexmake.py
LatexMaker.latex_run
def latex_run(self): ''' Start latex run. ''' self.log.info('Running %s...' % self.latex_cmd) cmd = [self.latex_cmd] cmd.extend(LATEX_FLAGS) cmd.append('%s.tex' % self.project_name) try: with open(os.devnull, 'w') as null: Popen(cmd, stdout=null, stderr=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % self.latex_cmd) self.latex_run_counter += 1 fname = '%s.log' % self.project_name with codecs.open(fname, 'r', 'utf-8', 'replace') as fobj: self.out = fobj.read() self.check_errors()
python
def latex_run(self): ''' Start latex run. ''' self.log.info('Running %s...' % self.latex_cmd) cmd = [self.latex_cmd] cmd.extend(LATEX_FLAGS) cmd.append('%s.tex' % self.project_name) try: with open(os.devnull, 'w') as null: Popen(cmd, stdout=null, stderr=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % self.latex_cmd) self.latex_run_counter += 1 fname = '%s.log' % self.project_name with codecs.open(fname, 'r', 'utf-8', 'replace') as fobj: self.out = fobj.read() self.check_errors()
[ "def", "latex_run", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Running %s...'", "%", "self", ".", "latex_cmd", ")", "cmd", "=", "[", "self", ".", "latex_cmd", "]", "cmd", ".", "extend", "(", "LATEX_FLAGS", ")", "cmd", ".", "append", "(", "'%s.tex'", "%", "self", ".", "project_name", ")", "try", ":", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "null", ":", "Popen", "(", "cmd", ",", "stdout", "=", "null", ",", "stderr", "=", "null", ")", ".", "wait", "(", ")", "except", "OSError", ":", "self", ".", "log", ".", "error", "(", "NO_LATEX_ERROR", "%", "self", ".", "latex_cmd", ")", "self", ".", "latex_run_counter", "+=", "1", "fname", "=", "'%s.log'", "%", "self", ".", "project_name", "with", "codecs", ".", "open", "(", "fname", ",", "'r'", ",", "'utf-8'", ",", "'replace'", ")", "as", "fobj", ":", "self", ".", "out", "=", "fobj", ".", "read", "(", ")", "self", ".", "check_errors", "(", ")" ]
Start latex run.
[ "Start", "latex", "run", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L261-L279
train
schlamar/latexmk.py
latexmake.py
LatexMaker.bibtex_run
def bibtex_run(self): ''' Start bibtex run. ''' self.log.info('Running bibtex...') try: with open(os.devnull, 'w') as null: Popen(['bibtex', self.project_name], stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'bibtex') sys.exit(1) shutil.copy('%s.bib' % self.bib_file, '%s.bib.old' % self.bib_file)
python
def bibtex_run(self): ''' Start bibtex run. ''' self.log.info('Running bibtex...') try: with open(os.devnull, 'w') as null: Popen(['bibtex', self.project_name], stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'bibtex') sys.exit(1) shutil.copy('%s.bib' % self.bib_file, '%s.bib.old' % self.bib_file)
[ "def", "bibtex_run", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Running bibtex...'", ")", "try", ":", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "null", ":", "Popen", "(", "[", "'bibtex'", ",", "self", ".", "project_name", "]", ",", "stdout", "=", "null", ")", ".", "wait", "(", ")", "except", "OSError", ":", "self", ".", "log", ".", "error", "(", "NO_LATEX_ERROR", "%", "'bibtex'", ")", "sys", ".", "exit", "(", "1", ")", "shutil", ".", "copy", "(", "'%s.bib'", "%", "self", ".", "bib_file", ",", "'%s.bib.old'", "%", "self", ".", "bib_file", ")" ]
Start bibtex run.
[ "Start", "bibtex", "run", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L281-L294
train
schlamar/latexmk.py
latexmake.py
LatexMaker.makeindex_runs
def makeindex_runs(self, gloss_files): ''' Check for each glossary if it has to be regenerated with "makeindex". @return: True if "makeindex" was called. ''' gloss_changed = False for gloss in self.glossaries: make_gloss = False ext_i, ext_o = self.glossaries[gloss] fname_in = '%s.%s' % (self.project_name, ext_i) fname_out = '%s.%s' % (self.project_name, ext_o) if re.search('No file %s.' % fname_in, self.out): make_gloss = True if not os.path.isfile(fname_out): make_gloss = True else: with open(fname_out) as fobj: try: if gloss_files[gloss] != fobj.read(): make_gloss = True except KeyError: make_gloss = True if make_gloss: self.log.info('Running makeindex (%s)...' % gloss) try: cmd = ['makeindex', '-q', '-s', '%s.ist' % self.project_name, '-o', fname_in, fname_out] with open(os.devnull, 'w') as null: Popen(cmd, stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'makeindex') sys.exit(1) gloss_changed = True return gloss_changed
python
def makeindex_runs(self, gloss_files): ''' Check for each glossary if it has to be regenerated with "makeindex". @return: True if "makeindex" was called. ''' gloss_changed = False for gloss in self.glossaries: make_gloss = False ext_i, ext_o = self.glossaries[gloss] fname_in = '%s.%s' % (self.project_name, ext_i) fname_out = '%s.%s' % (self.project_name, ext_o) if re.search('No file %s.' % fname_in, self.out): make_gloss = True if not os.path.isfile(fname_out): make_gloss = True else: with open(fname_out) as fobj: try: if gloss_files[gloss] != fobj.read(): make_gloss = True except KeyError: make_gloss = True if make_gloss: self.log.info('Running makeindex (%s)...' % gloss) try: cmd = ['makeindex', '-q', '-s', '%s.ist' % self.project_name, '-o', fname_in, fname_out] with open(os.devnull, 'w') as null: Popen(cmd, stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'makeindex') sys.exit(1) gloss_changed = True return gloss_changed
[ "def", "makeindex_runs", "(", "self", ",", "gloss_files", ")", ":", "gloss_changed", "=", "False", "for", "gloss", "in", "self", ".", "glossaries", ":", "make_gloss", "=", "False", "ext_i", ",", "ext_o", "=", "self", ".", "glossaries", "[", "gloss", "]", "fname_in", "=", "'%s.%s'", "%", "(", "self", ".", "project_name", ",", "ext_i", ")", "fname_out", "=", "'%s.%s'", "%", "(", "self", ".", "project_name", ",", "ext_o", ")", "if", "re", ".", "search", "(", "'No file %s.'", "%", "fname_in", ",", "self", ".", "out", ")", ":", "make_gloss", "=", "True", "if", "not", "os", ".", "path", ".", "isfile", "(", "fname_out", ")", ":", "make_gloss", "=", "True", "else", ":", "with", "open", "(", "fname_out", ")", "as", "fobj", ":", "try", ":", "if", "gloss_files", "[", "gloss", "]", "!=", "fobj", ".", "read", "(", ")", ":", "make_gloss", "=", "True", "except", "KeyError", ":", "make_gloss", "=", "True", "if", "make_gloss", ":", "self", ".", "log", ".", "info", "(", "'Running makeindex (%s)...'", "%", "gloss", ")", "try", ":", "cmd", "=", "[", "'makeindex'", ",", "'-q'", ",", "'-s'", ",", "'%s.ist'", "%", "self", ".", "project_name", ",", "'-o'", ",", "fname_in", ",", "fname_out", "]", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "null", ":", "Popen", "(", "cmd", ",", "stdout", "=", "null", ")", ".", "wait", "(", ")", "except", "OSError", ":", "self", ".", "log", ".", "error", "(", "NO_LATEX_ERROR", "%", "'makeindex'", ")", "sys", ".", "exit", "(", "1", ")", "gloss_changed", "=", "True", "return", "gloss_changed" ]
Check for each glossary if it has to be regenerated with "makeindex". @return: True if "makeindex" was called.
[ "Check", "for", "each", "glossary", "if", "it", "has", "to", "be", "regenerated", "with", "makeindex", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L296-L334
train
schlamar/latexmk.py
latexmake.py
LatexMaker.open_preview
def open_preview(self): ''' Try to open a preview of the generated document. Currently only supported on Windows. ''' self.log.info('Opening preview...') if self.opt.pdf: ext = 'pdf' else: ext = 'dvi' filename = '%s.%s' % (self.project_name, ext) if sys.platform == 'win32': try: os.startfile(filename) except OSError: self.log.error( 'Preview-Error: Extension .%s is not linked to a ' 'specific application!' % ext ) elif sys.platform == 'darwin': call(['open', filename]) else: self.log.error( 'Preview-Error: Preview function is currently not ' 'supported on Linux.' )
python
def open_preview(self): ''' Try to open a preview of the generated document. Currently only supported on Windows. ''' self.log.info('Opening preview...') if self.opt.pdf: ext = 'pdf' else: ext = 'dvi' filename = '%s.%s' % (self.project_name, ext) if sys.platform == 'win32': try: os.startfile(filename) except OSError: self.log.error( 'Preview-Error: Extension .%s is not linked to a ' 'specific application!' % ext ) elif sys.platform == 'darwin': call(['open', filename]) else: self.log.error( 'Preview-Error: Preview function is currently not ' 'supported on Linux.' )
[ "def", "open_preview", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Opening preview...'", ")", "if", "self", ".", "opt", ".", "pdf", ":", "ext", "=", "'pdf'", "else", ":", "ext", "=", "'dvi'", "filename", "=", "'%s.%s'", "%", "(", "self", ".", "project_name", ",", "ext", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "try", ":", "os", ".", "startfile", "(", "filename", ")", "except", "OSError", ":", "self", ".", "log", ".", "error", "(", "'Preview-Error: Extension .%s is not linked to a '", "'specific application!'", "%", "ext", ")", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "call", "(", "[", "'open'", ",", "filename", "]", ")", "else", ":", "self", ".", "log", ".", "error", "(", "'Preview-Error: Preview function is currently not '", "'supported on Linux.'", ")" ]
Try to open a preview of the generated document. Currently only supported on Windows.
[ "Try", "to", "open", "a", "preview", "of", "the", "generated", "document", ".", "Currently", "only", "supported", "on", "Windows", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L336-L361
train
schlamar/latexmk.py
latexmake.py
LatexMaker.need_latex_rerun
def need_latex_rerun(self): ''' Test for all rerun patterns if they match the output. ''' for pattern in LATEX_RERUN_PATTERNS: if pattern.search(self.out): return True return False
python
def need_latex_rerun(self): ''' Test for all rerun patterns if they match the output. ''' for pattern in LATEX_RERUN_PATTERNS: if pattern.search(self.out): return True return False
[ "def", "need_latex_rerun", "(", "self", ")", ":", "for", "pattern", "in", "LATEX_RERUN_PATTERNS", ":", "if", "pattern", ".", "search", "(", "self", ".", "out", ")", ":", "return", "True", "return", "False" ]
Test for all rerun patterns if they match the output.
[ "Test", "for", "all", "rerun", "patterns", "if", "they", "match", "the", "output", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L363-L370
train
schlamar/latexmk.py
latexmake.py
LatexMaker.run
def run(self): '''Run the LaTeX compilation.''' # store files self.old_dir = [] if self.opt.clean: self.old_dir = os.listdir('.') cite_counter, toc_file, gloss_files = self._read_latex_files() self.latex_run() self.read_glossaries() gloss_changed = self.makeindex_runs(gloss_files) if gloss_changed or self._is_toc_changed(toc_file): self.latex_run() if self._need_bib_run(cite_counter): self.bibtex_run() self.latex_run() while (self.latex_run_counter < MAX_RUNS): if not self.need_latex_rerun(): break self.latex_run() if self.opt.check_cite: cites = set() with open('%s.aux' % self.project_name) as fobj: aux_content = fobj.read() for match in BIBCITE_PATTERN.finditer(aux_content): name = match.groups()[0] cites.add(name) with open('%s.bib' % self.bib_file) as fobj: bib_content = fobj.read() for match in BIBENTRY_PATTERN.finditer(bib_content): name = match.groups()[0] if name not in cites: self.log.info('Bib entry not cited: "%s"' % name) if self.opt.clean: ending = '.dvi' if self.opt.pdf: ending = '.pdf' for fname in os.listdir('.'): if not (fname in self.old_dir or fname.endswith(ending)): try: os.remove(fname) except IOError: pass if self.opt.preview: self.open_preview()
python
def run(self): '''Run the LaTeX compilation.''' # store files self.old_dir = [] if self.opt.clean: self.old_dir = os.listdir('.') cite_counter, toc_file, gloss_files = self._read_latex_files() self.latex_run() self.read_glossaries() gloss_changed = self.makeindex_runs(gloss_files) if gloss_changed or self._is_toc_changed(toc_file): self.latex_run() if self._need_bib_run(cite_counter): self.bibtex_run() self.latex_run() while (self.latex_run_counter < MAX_RUNS): if not self.need_latex_rerun(): break self.latex_run() if self.opt.check_cite: cites = set() with open('%s.aux' % self.project_name) as fobj: aux_content = fobj.read() for match in BIBCITE_PATTERN.finditer(aux_content): name = match.groups()[0] cites.add(name) with open('%s.bib' % self.bib_file) as fobj: bib_content = fobj.read() for match in BIBENTRY_PATTERN.finditer(bib_content): name = match.groups()[0] if name not in cites: self.log.info('Bib entry not cited: "%s"' % name) if self.opt.clean: ending = '.dvi' if self.opt.pdf: ending = '.pdf' for fname in os.listdir('.'): if not (fname in self.old_dir or fname.endswith(ending)): try: os.remove(fname) except IOError: pass if self.opt.preview: self.open_preview()
[ "def", "run", "(", "self", ")", ":", "# store files", "self", ".", "old_dir", "=", "[", "]", "if", "self", ".", "opt", ".", "clean", ":", "self", ".", "old_dir", "=", "os", ".", "listdir", "(", "'.'", ")", "cite_counter", ",", "toc_file", ",", "gloss_files", "=", "self", ".", "_read_latex_files", "(", ")", "self", ".", "latex_run", "(", ")", "self", ".", "read_glossaries", "(", ")", "gloss_changed", "=", "self", ".", "makeindex_runs", "(", "gloss_files", ")", "if", "gloss_changed", "or", "self", ".", "_is_toc_changed", "(", "toc_file", ")", ":", "self", ".", "latex_run", "(", ")", "if", "self", ".", "_need_bib_run", "(", "cite_counter", ")", ":", "self", ".", "bibtex_run", "(", ")", "self", ".", "latex_run", "(", ")", "while", "(", "self", ".", "latex_run_counter", "<", "MAX_RUNS", ")", ":", "if", "not", "self", ".", "need_latex_rerun", "(", ")", ":", "break", "self", ".", "latex_run", "(", ")", "if", "self", ".", "opt", ".", "check_cite", ":", "cites", "=", "set", "(", ")", "with", "open", "(", "'%s.aux'", "%", "self", ".", "project_name", ")", "as", "fobj", ":", "aux_content", "=", "fobj", ".", "read", "(", ")", "for", "match", "in", "BIBCITE_PATTERN", ".", "finditer", "(", "aux_content", ")", ":", "name", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "cites", ".", "add", "(", "name", ")", "with", "open", "(", "'%s.bib'", "%", "self", ".", "bib_file", ")", "as", "fobj", ":", "bib_content", "=", "fobj", ".", "read", "(", ")", "for", "match", "in", "BIBENTRY_PATTERN", ".", "finditer", "(", "bib_content", ")", ":", "name", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "if", "name", "not", "in", "cites", ":", "self", ".", "log", ".", "info", "(", "'Bib entry not cited: \"%s\"'", "%", "name", ")", "if", "self", ".", "opt", ".", "clean", ":", "ending", "=", "'.dvi'", "if", "self", ".", "opt", ".", "pdf", ":", "ending", "=", "'.pdf'", "for", "fname", "in", "os", ".", "listdir", "(", "'.'", ")", ":", "if", "not", "(", "fname", "in", "self", ".", "old_dir", "or", "fname", ".", "endswith", "(", "ending", ")", ")", ":", "try", ":", "os", ".", "remove", "(", "fname", ")", "except", "IOError", ":", "pass", "if", "self", ".", "opt", ".", "preview", ":", "self", ".", "open_preview", "(", ")" ]
Run the LaTeX compilation.
[ "Run", "the", "LaTeX", "compilation", "." ]
88baba40ff3e844e4542de60d2032503e206d996
https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L372-L426
train
DarkEnergySurvey/ugali
ugali/analysis/farm.py
Farm.command
def command(self, outfile, configfile, pix): """ Generate the command for running the likelihood scan. """ params = dict(script=self.config['scan']['script'], config=configfile, outfile=outfile, nside=self.nside_likelihood, pix=pix, verbose='-v' if self.verbose else '') cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s'%params return cmd
python
def command(self, outfile, configfile, pix): """ Generate the command for running the likelihood scan. """ params = dict(script=self.config['scan']['script'], config=configfile, outfile=outfile, nside=self.nside_likelihood, pix=pix, verbose='-v' if self.verbose else '') cmd = '%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s'%params return cmd
[ "def", "command", "(", "self", ",", "outfile", ",", "configfile", ",", "pix", ")", ":", "params", "=", "dict", "(", "script", "=", "self", ".", "config", "[", "'scan'", "]", "[", "'script'", "]", ",", "config", "=", "configfile", ",", "outfile", "=", "outfile", ",", "nside", "=", "self", ".", "nside_likelihood", ",", "pix", "=", "pix", ",", "verbose", "=", "'-v'", "if", "self", ".", "verbose", "else", "''", ")", "cmd", "=", "'%(script)s %(config)s %(outfile)s --hpx %(nside)i %(pix)i %(verbose)s'", "%", "params", "return", "cmd" ]
Generate the command for running the likelihood scan.
[ "Generate", "the", "command", "for", "running", "the", "likelihood", "scan", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/farm.py#L47-L56
train
DarkEnergySurvey/ugali
ugali/analysis/farm.py
Farm.submit_all
def submit_all(self, coords=None, queue=None, debug=False): """ Submit likelihood analyses on a set of coordinates. If coords is `None`, submit all coordinates in the footprint. Inputs: coords : Array of target locations in Galactic coordinates. queue : Overwrite submit queue. debug : Don't run. """ if coords is None: pixels = np.arange(hp.nside2npix(self.nside_likelihood)) else: coords = np.asarray(coords) if coords.ndim == 1: coords = np.array([coords]) if coords.shape[1] == 2: lon,lat = coords.T radius = np.zeros(len(lon)) elif coords.shape[1] == 3: lon,lat,radius = coords.T else: raise Exception("Unrecognized coords shape:"+str(coords.shape)) #ADW: targets is still in glon,glat if self.config['coords']['coordsys'].lower() == 'cel': lon,lat = gal2cel(lon,lat) vec = ang2vec(lon,lat) pixels = np.zeros(0, dtype=int) for v,r in zip(vec,radius): pix = query_disc(self.nside_likelihood,v,r,inclusive=True,fact=32) pixels = np.hstack([pixels, pix]) #pixels = np.unique(pixels) inside = ugali.utils.skymap.inFootprint(self.config,pixels) if inside.sum() != len(pixels): logger.warning("Ignoring pixels outside survey footprint:\n"+str(pixels[~inside])) if inside.sum() == 0: logger.warning("No pixels inside footprint.") return # Only write the configfile once outdir = mkdir(self.config['output']['likedir']) # Actually copy config instead of re-writing shutil.copy(self.config.filename,outdir) configfile = join(outdir,os.path.basename(self.config.filename)) pixels = pixels[inside] self.submit(pixels,queue=queue,debug=debug,configfile=configfile)
python
def submit_all(self, coords=None, queue=None, debug=False): """ Submit likelihood analyses on a set of coordinates. If coords is `None`, submit all coordinates in the footprint. Inputs: coords : Array of target locations in Galactic coordinates. queue : Overwrite submit queue. debug : Don't run. """ if coords is None: pixels = np.arange(hp.nside2npix(self.nside_likelihood)) else: coords = np.asarray(coords) if coords.ndim == 1: coords = np.array([coords]) if coords.shape[1] == 2: lon,lat = coords.T radius = np.zeros(len(lon)) elif coords.shape[1] == 3: lon,lat,radius = coords.T else: raise Exception("Unrecognized coords shape:"+str(coords.shape)) #ADW: targets is still in glon,glat if self.config['coords']['coordsys'].lower() == 'cel': lon,lat = gal2cel(lon,lat) vec = ang2vec(lon,lat) pixels = np.zeros(0, dtype=int) for v,r in zip(vec,radius): pix = query_disc(self.nside_likelihood,v,r,inclusive=True,fact=32) pixels = np.hstack([pixels, pix]) #pixels = np.unique(pixels) inside = ugali.utils.skymap.inFootprint(self.config,pixels) if inside.sum() != len(pixels): logger.warning("Ignoring pixels outside survey footprint:\n"+str(pixels[~inside])) if inside.sum() == 0: logger.warning("No pixels inside footprint.") return # Only write the configfile once outdir = mkdir(self.config['output']['likedir']) # Actually copy config instead of re-writing shutil.copy(self.config.filename,outdir) configfile = join(outdir,os.path.basename(self.config.filename)) pixels = pixels[inside] self.submit(pixels,queue=queue,debug=debug,configfile=configfile)
[ "def", "submit_all", "(", "self", ",", "coords", "=", "None", ",", "queue", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "coords", "is", "None", ":", "pixels", "=", "np", ".", "arange", "(", "hp", ".", "nside2npix", "(", "self", ".", "nside_likelihood", ")", ")", "else", ":", "coords", "=", "np", ".", "asarray", "(", "coords", ")", "if", "coords", ".", "ndim", "==", "1", ":", "coords", "=", "np", ".", "array", "(", "[", "coords", "]", ")", "if", "coords", ".", "shape", "[", "1", "]", "==", "2", ":", "lon", ",", "lat", "=", "coords", ".", "T", "radius", "=", "np", ".", "zeros", "(", "len", "(", "lon", ")", ")", "elif", "coords", ".", "shape", "[", "1", "]", "==", "3", ":", "lon", ",", "lat", ",", "radius", "=", "coords", ".", "T", "else", ":", "raise", "Exception", "(", "\"Unrecognized coords shape:\"", "+", "str", "(", "coords", ".", "shape", ")", ")", "#ADW: targets is still in glon,glat", "if", "self", ".", "config", "[", "'coords'", "]", "[", "'coordsys'", "]", ".", "lower", "(", ")", "==", "'cel'", ":", "lon", ",", "lat", "=", "gal2cel", "(", "lon", ",", "lat", ")", "vec", "=", "ang2vec", "(", "lon", ",", "lat", ")", "pixels", "=", "np", ".", "zeros", "(", "0", ",", "dtype", "=", "int", ")", "for", "v", ",", "r", "in", "zip", "(", "vec", ",", "radius", ")", ":", "pix", "=", "query_disc", "(", "self", ".", "nside_likelihood", ",", "v", ",", "r", ",", "inclusive", "=", "True", ",", "fact", "=", "32", ")", "pixels", "=", "np", ".", "hstack", "(", "[", "pixels", ",", "pix", "]", ")", "#pixels = np.unique(pixels)", "inside", "=", "ugali", ".", "utils", ".", "skymap", ".", "inFootprint", "(", "self", ".", "config", ",", "pixels", ")", "if", "inside", ".", "sum", "(", ")", "!=", "len", "(", "pixels", ")", ":", "logger", ".", "warning", "(", "\"Ignoring pixels outside survey footprint:\\n\"", "+", "str", "(", "pixels", "[", "~", "inside", "]", ")", ")", "if", "inside", ".", "sum", "(", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"No pixels inside footprint.\"", ")", "return", "# Only write the configfile once", "outdir", "=", "mkdir", "(", "self", ".", "config", "[", "'output'", "]", "[", "'likedir'", "]", ")", "# Actually copy config instead of re-writing", "shutil", ".", "copy", "(", "self", ".", "config", ".", "filename", ",", "outdir", ")", "configfile", "=", "join", "(", "outdir", ",", "os", ".", "path", ".", "basename", "(", "self", ".", "config", ".", "filename", ")", ")", "pixels", "=", "pixels", "[", "inside", "]", "self", ".", "submit", "(", "pixels", ",", "queue", "=", "queue", ",", "debug", "=", "debug", ",", "configfile", "=", "configfile", ")" ]
Submit likelihood analyses on a set of coordinates. If coords is `None`, submit all coordinates in the footprint. Inputs: coords : Array of target locations in Galactic coordinates. queue : Overwrite submit queue. debug : Don't run.
[ "Submit", "likelihood", "analyses", "on", "a", "set", "of", "coordinates", ".", "If", "coords", "is", "None", "submit", "all", "coordinates", "in", "the", "footprint", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/farm.py#L104-L153
train
Genida/django-appsettings
src/appsettings/__init__.py
AppSettings.check
def check(cls): """ Class method to check every settings. Will raise an ``ImproperlyConfigured`` exception with explanation. """ if cls == AppSettings: return None exceptions = [] for setting in cls.settings.values(): try: setting.check() # pylama:ignore=W0703 except Exception as e: exceptions.append(str(e)) if exceptions: raise ImproperlyConfigured("\n".join(exceptions))
python
def check(cls): """ Class method to check every settings. Will raise an ``ImproperlyConfigured`` exception with explanation. """ if cls == AppSettings: return None exceptions = [] for setting in cls.settings.values(): try: setting.check() # pylama:ignore=W0703 except Exception as e: exceptions.append(str(e)) if exceptions: raise ImproperlyConfigured("\n".join(exceptions))
[ "def", "check", "(", "cls", ")", ":", "if", "cls", "==", "AppSettings", ":", "return", "None", "exceptions", "=", "[", "]", "for", "setting", "in", "cls", ".", "settings", ".", "values", "(", ")", ":", "try", ":", "setting", ".", "check", "(", ")", "# pylama:ignore=W0703", "except", "Exception", "as", "e", ":", "exceptions", ".", "append", "(", "str", "(", "e", ")", ")", "if", "exceptions", ":", "raise", "ImproperlyConfigured", "(", "\"\\n\"", ".", "join", "(", "exceptions", ")", ")" ]
Class method to check every settings. Will raise an ``ImproperlyConfigured`` exception with explanation.
[ "Class", "method", "to", "check", "every", "settings", "." ]
f98867d133558af7dc067f12b44fc1ee4edd4239
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/__init__.py#L192-L209
train
alphagov/performanceplatform-collector
performanceplatform/collector/arguments.py
parse_args
def parse_args(name="", args=None): """Parse command line argument for a collector Returns an argparse.Namespace with 'config' and 'query' options""" def _load_json_file(path): with open(path) as f: json_data = json.load(f) json_data['path_to_json_file'] = path return json_data parser = argparse.ArgumentParser(description="%s collector for sending" " data to the performance" " platform" % name) parser.add_argument('-c', '--credentials', dest='credentials', type=_load_json_file, help='JSON file containing credentials ' 'for the collector', required=True) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--collector', dest='collector_slug', type=str, help='Collector slug to query the API for the ' 'collector config') group.add_argument('-q', '--query', dest='query', type=_load_json_file, help='JSON file containing details ' 'about the query to make ' 'against the source API ' 'and the target data-set') parser.add_argument('-t', '--token', dest='token', type=_load_json_file, help='JSON file containing token ' 'for the collector', required=True) parser.add_argument('-b', '--performanceplatform', dest='performanceplatform', type=_load_json_file, help='JSON file containing the Performance Platform ' 'config for the collector', required=True) parser.add_argument('-s', '--start', dest='start_at', type=parse_date, help='Date to start collection from') parser.add_argument('-e', '--end', dest='end_at', type=parse_date, help='Date to end collection') parser.add_argument('--console-logging', dest='console_logging', action='store_true', help='Output logging to the console rather than file') parser.add_argument('--dry-run', dest='dry_run', action='store_true', help='Instead of pushing to the Performance Platform ' 'the collector will print out what would have ' 'been pushed') parser.set_defaults(console_logging=False, dry_run=False) args = parser.parse_args(args) return args
python
def parse_args(name="", args=None): """Parse command line argument for a collector Returns an argparse.Namespace with 'config' and 'query' options""" def _load_json_file(path): with open(path) as f: json_data = json.load(f) json_data['path_to_json_file'] = path return json_data parser = argparse.ArgumentParser(description="%s collector for sending" " data to the performance" " platform" % name) parser.add_argument('-c', '--credentials', dest='credentials', type=_load_json_file, help='JSON file containing credentials ' 'for the collector', required=True) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--collector', dest='collector_slug', type=str, help='Collector slug to query the API for the ' 'collector config') group.add_argument('-q', '--query', dest='query', type=_load_json_file, help='JSON file containing details ' 'about the query to make ' 'against the source API ' 'and the target data-set') parser.add_argument('-t', '--token', dest='token', type=_load_json_file, help='JSON file containing token ' 'for the collector', required=True) parser.add_argument('-b', '--performanceplatform', dest='performanceplatform', type=_load_json_file, help='JSON file containing the Performance Platform ' 'config for the collector', required=True) parser.add_argument('-s', '--start', dest='start_at', type=parse_date, help='Date to start collection from') parser.add_argument('-e', '--end', dest='end_at', type=parse_date, help='Date to end collection') parser.add_argument('--console-logging', dest='console_logging', action='store_true', help='Output logging to the console rather than file') parser.add_argument('--dry-run', dest='dry_run', action='store_true', help='Instead of pushing to the Performance Platform ' 'the collector will print out what would have ' 'been pushed') parser.set_defaults(console_logging=False, dry_run=False) args = parser.parse_args(args) return args
[ "def", "parse_args", "(", "name", "=", "\"\"", ",", "args", "=", "None", ")", ":", "def", "_load_json_file", "(", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "json_data", "=", "json", ".", "load", "(", "f", ")", "json_data", "[", "'path_to_json_file'", "]", "=", "path", "return", "json_data", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"%s collector for sending\"", "\" data to the performance\"", "\" platform\"", "%", "name", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "'--credentials'", ",", "dest", "=", "'credentials'", ",", "type", "=", "_load_json_file", ",", "help", "=", "'JSON file containing credentials '", "'for the collector'", ",", "required", "=", "True", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "group", ".", "add_argument", "(", "'-l'", ",", "'--collector'", ",", "dest", "=", "'collector_slug'", ",", "type", "=", "str", ",", "help", "=", "'Collector slug to query the API for the '", "'collector config'", ")", "group", ".", "add_argument", "(", "'-q'", ",", "'--query'", ",", "dest", "=", "'query'", ",", "type", "=", "_load_json_file", ",", "help", "=", "'JSON file containing details '", "'about the query to make '", "'against the source API '", "'and the target data-set'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--token'", ",", "dest", "=", "'token'", ",", "type", "=", "_load_json_file", ",", "help", "=", "'JSON file containing token '", "'for the collector'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--performanceplatform'", ",", "dest", "=", "'performanceplatform'", ",", "type", "=", "_load_json_file", ",", "help", "=", "'JSON file containing the Performance Platform '", "'config for the collector'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--start'", ",", "dest", "=", "'start_at'", ",", "type", "=", "parse_date", ",", "help", "=", "'Date to start collection from'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--end'", ",", "dest", "=", "'end_at'", ",", "type", "=", "parse_date", ",", "help", "=", "'Date to end collection'", ")", "parser", ".", "add_argument", "(", "'--console-logging'", ",", "dest", "=", "'console_logging'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Output logging to the console rather than file'", ")", "parser", ".", "add_argument", "(", "'--dry-run'", ",", "dest", "=", "'dry_run'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Instead of pushing to the Performance Platform '", "'the collector will print out what would have '", "'been pushed'", ")", "parser", ".", "set_defaults", "(", "console_logging", "=", "False", ",", "dry_run", "=", "False", ")", "args", "=", "parser", ".", "parse_args", "(", "args", ")", "return", "args" ]
Parse command line argument for a collector Returns an argparse.Namespace with 'config' and 'query' options
[ "Parse", "command", "line", "argument", "for", "a", "collector" ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/arguments.py#L6-L63
train
consbio/ncdjango
ncdjango/config.py
RenderConfiguration.hash
def hash(self): """ Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only. """ renderer_str = "{}|{}|{}|{}".format( self.renderer.__class__.__name__, self.renderer.colormap, self.renderer.fill_value, self.renderer.background_color ) if isinstance(self.renderer, StretchedRenderer): renderer_str = "{}|{}|{}".format(renderer_str, self.renderer.method, self.renderer.colorspace) elif isinstance(self.renderer, UniqueValuesRenderer): renderer_str = "{}|{}".format(renderer_str, self.renderer.labels) return hash("{}/{}/{}".format(self.variable.pk, renderer_str, self.time_index))
python
def hash(self): """ Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only. """ renderer_str = "{}|{}|{}|{}".format( self.renderer.__class__.__name__, self.renderer.colormap, self.renderer.fill_value, self.renderer.background_color ) if isinstance(self.renderer, StretchedRenderer): renderer_str = "{}|{}|{}".format(renderer_str, self.renderer.method, self.renderer.colorspace) elif isinstance(self.renderer, UniqueValuesRenderer): renderer_str = "{}|{}".format(renderer_str, self.renderer.labels) return hash("{}/{}/{}".format(self.variable.pk, renderer_str, self.time_index))
[ "def", "hash", "(", "self", ")", ":", "renderer_str", "=", "\"{}|{}|{}|{}\"", ".", "format", "(", "self", ".", "renderer", ".", "__class__", ".", "__name__", ",", "self", ".", "renderer", ".", "colormap", ",", "self", ".", "renderer", ".", "fill_value", ",", "self", ".", "renderer", ".", "background_color", ")", "if", "isinstance", "(", "self", ".", "renderer", ",", "StretchedRenderer", ")", ":", "renderer_str", "=", "\"{}|{}|{}\"", ".", "format", "(", "renderer_str", ",", "self", ".", "renderer", ".", "method", ",", "self", ".", "renderer", ".", "colorspace", ")", "elif", "isinstance", "(", "self", ".", "renderer", ",", "UniqueValuesRenderer", ")", ":", "renderer_str", "=", "\"{}|{}\"", ".", "format", "(", "renderer_str", ",", "self", ".", "renderer", ".", "labels", ")", "return", "hash", "(", "\"{}/{}/{}\"", ".", "format", "(", "self", ".", "variable", ".", "pk", ",", "renderer_str", ",", "self", ".", "time_index", ")", ")" ]
Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only.
[ "Returns", "a", "hash", "of", "this", "render", "configuration", "from", "the", "variable", "renderer", "and", "time_index", "parameters", ".", "Used", "for", "caching", "the", "full", "-", "extent", "native", "projection", "render", "so", "that", "subsequent", "requests", "can", "be", "served", "by", "a", "warp", "operation", "only", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/config.py#L54-L70
train
DarkEnergySurvey/ugali
ugali/utils/factory.py
factory
def factory(type, module=None, **kwargs): """ Factory for creating objects. Arguments are passed directly to the constructor of the chosen class. """ cls = type if module is None: module = __name__ fn = lambda member: inspect.isclass(member) and member.__module__==module classes = odict(inspect.getmembers(sys.modules[module], fn)) members = odict([(k.lower(),v) for k,v in classes.items()]) lower = cls.lower() if lower not in list(members.keys()): msg = "Unrecognized class: %s.%s"%(module,cls) raise KeyError(msg) return members[lower](**kwargs)
python
def factory(type, module=None, **kwargs): """ Factory for creating objects. Arguments are passed directly to the constructor of the chosen class. """ cls = type if module is None: module = __name__ fn = lambda member: inspect.isclass(member) and member.__module__==module classes = odict(inspect.getmembers(sys.modules[module], fn)) members = odict([(k.lower(),v) for k,v in classes.items()]) lower = cls.lower() if lower not in list(members.keys()): msg = "Unrecognized class: %s.%s"%(module,cls) raise KeyError(msg) return members[lower](**kwargs)
[ "def", "factory", "(", "type", ",", "module", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "type", "if", "module", "is", "None", ":", "module", "=", "__name__", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "module", "classes", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "module", "]", ",", "fn", ")", ")", "members", "=", "odict", "(", "[", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "k", ",", "v", "in", "classes", ".", "items", "(", ")", "]", ")", "lower", "=", "cls", ".", "lower", "(", ")", "if", "lower", "not", "in", "list", "(", "members", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"Unrecognized class: %s.%s\"", "%", "(", "module", ",", "cls", ")", "raise", "KeyError", "(", "msg", ")", "return", "members", "[", "lower", "]", "(", "*", "*", "kwargs", ")" ]
Factory for creating objects. Arguments are passed directly to the constructor of the chosen class.
[ "Factory", "for", "creating", "objects", ".", "Arguments", "are", "passed", "directly", "to", "the", "constructor", "of", "the", "chosen", "class", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/factory.py#L9-L23
train
consbio/ncdjango
ncdjango/interfaces/arcgis_extended/utils.py
get_definition_from_renderer
def get_definition_from_renderer(renderer): """Returns a dictionary definition of the given renderer""" config = { 'colors': [[x[0], x[1].to_hex()] for x in renderer.colormap], 'options': {} } if renderer.fill_value: config['options']['fill_value'] = renderer.fill_value if isinstance(renderer, StretchedRenderer): config['type'] = 'stretched' config['options']['color_space'] = renderer.colorspace elif isinstance(renderer, UniqueValuesRenderer): config['type'] = 'unique' if renderer.labels: config['options']['labels'] = renderer.labels elif isinstance(renderer, ClassifiedRenderer): config['type'] = 'classified' else: raise ValueError('{0} is not a valid renderer type'.format(renderer.__class__.__name__)) return config
python
def get_definition_from_renderer(renderer): """Returns a dictionary definition of the given renderer""" config = { 'colors': [[x[0], x[1].to_hex()] for x in renderer.colormap], 'options': {} } if renderer.fill_value: config['options']['fill_value'] = renderer.fill_value if isinstance(renderer, StretchedRenderer): config['type'] = 'stretched' config['options']['color_space'] = renderer.colorspace elif isinstance(renderer, UniqueValuesRenderer): config['type'] = 'unique' if renderer.labels: config['options']['labels'] = renderer.labels elif isinstance(renderer, ClassifiedRenderer): config['type'] = 'classified' else: raise ValueError('{0} is not a valid renderer type'.format(renderer.__class__.__name__)) return config
[ "def", "get_definition_from_renderer", "(", "renderer", ")", ":", "config", "=", "{", "'colors'", ":", "[", "[", "x", "[", "0", "]", ",", "x", "[", "1", "]", ".", "to_hex", "(", ")", "]", "for", "x", "in", "renderer", ".", "colormap", "]", ",", "'options'", ":", "{", "}", "}", "if", "renderer", ".", "fill_value", ":", "config", "[", "'options'", "]", "[", "'fill_value'", "]", "=", "renderer", ".", "fill_value", "if", "isinstance", "(", "renderer", ",", "StretchedRenderer", ")", ":", "config", "[", "'type'", "]", "=", "'stretched'", "config", "[", "'options'", "]", "[", "'color_space'", "]", "=", "renderer", ".", "colorspace", "elif", "isinstance", "(", "renderer", ",", "UniqueValuesRenderer", ")", ":", "config", "[", "'type'", "]", "=", "'unique'", "if", "renderer", ".", "labels", ":", "config", "[", "'options'", "]", "[", "'labels'", "]", "=", "renderer", ".", "labels", "elif", "isinstance", "(", "renderer", ",", "ClassifiedRenderer", ")", ":", "config", "[", "'type'", "]", "=", "'classified'", "else", ":", "raise", "ValueError", "(", "'{0} is not a valid renderer type'", ".", "format", "(", "renderer", ".", "__class__", ".", "__name__", ")", ")", "return", "config" ]
Returns a dictionary definition of the given renderer
[ "Returns", "a", "dictionary", "definition", "of", "the", "given", "renderer" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis_extended/utils.py#L46-L69
train
DarkEnergySurvey/ugali
ugali/analysis/source.py
Source.set_model
def set_model(self, name, model): """ Set a model. Parameters ---------- name : name of the model -- e.g., richness, kernel, isochrone, etc. model : the model instance Returns ------- None """ # Careful to not use `hasattr` # https://hynek.me/articles/hasattr/ try: self.__getattribute__('models') except AttributeError: object.__setattr__(self, 'models',odict()) self.models[name] = model
python
def set_model(self, name, model): """ Set a model. Parameters ---------- name : name of the model -- e.g., richness, kernel, isochrone, etc. model : the model instance Returns ------- None """ # Careful to not use `hasattr` # https://hynek.me/articles/hasattr/ try: self.__getattribute__('models') except AttributeError: object.__setattr__(self, 'models',odict()) self.models[name] = model
[ "def", "set_model", "(", "self", ",", "name", ",", "model", ")", ":", "# Careful to not use `hasattr`", "# https://hynek.me/articles/hasattr/", "try", ":", "self", ".", "__getattribute__", "(", "'models'", ")", "except", "AttributeError", ":", "object", ".", "__setattr__", "(", "self", ",", "'models'", ",", "odict", "(", ")", ")", "self", ".", "models", "[", "name", "]", "=", "model" ]
Set a model. Parameters ---------- name : name of the model -- e.g., richness, kernel, isochrone, etc. model : the model instance Returns ------- None
[ "Set", "a", "model", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/source.py#L170-L188
train
DarkEnergySurvey/ugali
ugali/analysis/source.py
Source.set_params
def set_params(self,**kwargs): """ Set the parameter values """ for key,value in list(kwargs.items()): setattr(self,key,value)
python
def set_params(self,**kwargs): """ Set the parameter values """ for key,value in list(kwargs.items()): setattr(self,key,value)
[ "def", "set_params", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "setattr", "(", "self", ",", "key", ",", "value", ")" ]
Set the parameter values
[ "Set", "the", "parameter", "values" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/source.py#L196-L199
train
DarkEnergySurvey/ugali
ugali/analysis/source.py
Source.get_params
def get_params(self): """ Get an odict of the parameter names and values """ return odict([(key,param.value) for key,param in self.params.items()])
python
def get_params(self): """ Get an odict of the parameter names and values """ return odict([(key,param.value) for key,param in self.params.items()])
[ "def", "get_params", "(", "self", ")", ":", "return", "odict", "(", "[", "(", "key", ",", "param", ".", "value", ")", "for", "key", ",", "param", "in", "self", ".", "params", ".", "items", "(", ")", "]", ")" ]
Get an odict of the parameter names and values
[ "Get", "an", "odict", "of", "the", "parameter", "names", "and", "values" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/source.py#L201-L203
train
DarkEnergySurvey/ugali
ugali/analysis/source.py
Source.get_free_params
def get_free_params(self): """ Get an odict of free parameter names and values """ return odict([(key,param.value) for key,param in self.params.items() if param.free])
python
def get_free_params(self): """ Get an odict of free parameter names and values """ return odict([(key,param.value) for key,param in self.params.items() if param.free])
[ "def", "get_free_params", "(", "self", ")", ":", "return", "odict", "(", "[", "(", "key", ",", "param", ".", "value", ")", "for", "key", ",", "param", "in", "self", ".", "params", ".", "items", "(", ")", "if", "param", ".", "free", "]", ")" ]
Get an odict of free parameter names and values
[ "Get", "an", "odict", "of", "free", "parameter", "names", "and", "values" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/source.py#L205-L207
train
totalgood/pugnlp
src/pugnlp/regexes.py
iter_finds
def iter_finds(regex_obj, s): """Generate all matches found within a string for a regex and yield each match as a string""" if isinstance(regex_obj, str): for m in re.finditer(regex_obj, s): yield m.group() else: for m in regex_obj.finditer(s): yield m.group()
python
def iter_finds(regex_obj, s): """Generate all matches found within a string for a regex and yield each match as a string""" if isinstance(regex_obj, str): for m in re.finditer(regex_obj, s): yield m.group() else: for m in regex_obj.finditer(s): yield m.group()
[ "def", "iter_finds", "(", "regex_obj", ",", "s", ")", ":", "if", "isinstance", "(", "regex_obj", ",", "str", ")", ":", "for", "m", "in", "re", ".", "finditer", "(", "regex_obj", ",", "s", ")", ":", "yield", "m", ".", "group", "(", ")", "else", ":", "for", "m", "in", "regex_obj", ".", "finditer", "(", "s", ")", ":", "yield", "m", ".", "group", "(", ")" ]
Generate all matches found within a string for a regex and yield each match as a string
[ "Generate", "all", "matches", "found", "within", "a", "string", "for", "a", "regex", "and", "yield", "each", "match", "as", "a", "string" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/regexes.py#L292-L299
train
DarkEnergySurvey/ugali
ugali/isochrone/composite.py
CompositeIsochrone.composite_decorator
def composite_decorator(func): """ Decorator for wrapping functions that calculate a weighted sum """ @wraps(func) def wrapper(self, *args, **kwargs): total = [] for weight,iso in zip(self.weights,self.isochrones): subfunc = getattr(iso,func.__name__) total.append(weight*subfunc(*args,**kwargs)) return np.sum(total,axis=0) return wrapper
python
def composite_decorator(func): """ Decorator for wrapping functions that calculate a weighted sum """ @wraps(func) def wrapper(self, *args, **kwargs): total = [] for weight,iso in zip(self.weights,self.isochrones): subfunc = getattr(iso,func.__name__) total.append(weight*subfunc(*args,**kwargs)) return np.sum(total,axis=0) return wrapper
[ "def", "composite_decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "total", "=", "[", "]", "for", "weight", ",", "iso", "in", "zip", "(", "self", ".", "weights", ",", "self", ".", "isochrones", ")", ":", "subfunc", "=", "getattr", "(", "iso", ",", "func", ".", "__name__", ")", "total", ".", "append", "(", "weight", "*", "subfunc", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "np", ".", "sum", "(", "total", ",", "axis", "=", "0", ")", "return", "wrapper" ]
Decorator for wrapping functions that calculate a weighted sum
[ "Decorator", "for", "wrapping", "functions", "that", "calculate", "a", "weighted", "sum" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/composite.py#L83-L94
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
mergeCatalogs
def mergeCatalogs(catalog_list): """ Merge a list of Catalogs. Parameters: ----------- catalog_list : List of Catalog objects. Returns: -------- catalog : Combined Catalog object """ # Check the columns for c in catalog_list: if c.data.dtype.names != catalog_list[0].data.dtype.names: msg = "Catalog data columns not the same." raise Exception(msg) data = np.concatenate([c.data for c in catalog_list]) config = catalog_list[0].config return Catalog(config,data=data)
python
def mergeCatalogs(catalog_list): """ Merge a list of Catalogs. Parameters: ----------- catalog_list : List of Catalog objects. Returns: -------- catalog : Combined Catalog object """ # Check the columns for c in catalog_list: if c.data.dtype.names != catalog_list[0].data.dtype.names: msg = "Catalog data columns not the same." raise Exception(msg) data = np.concatenate([c.data for c in catalog_list]) config = catalog_list[0].config return Catalog(config,data=data)
[ "def", "mergeCatalogs", "(", "catalog_list", ")", ":", "# Check the columns", "for", "c", "in", "catalog_list", ":", "if", "c", ".", "data", ".", "dtype", ".", "names", "!=", "catalog_list", "[", "0", "]", ".", "data", ".", "dtype", ".", "names", ":", "msg", "=", "\"Catalog data columns not the same.\"", "raise", "Exception", "(", "msg", ")", "data", "=", "np", ".", "concatenate", "(", "[", "c", ".", "data", "for", "c", "in", "catalog_list", "]", ")", "config", "=", "catalog_list", "[", "0", "]", ".", "config", "return", "Catalog", "(", "config", ",", "data", "=", "data", ")" ]
Merge a list of Catalogs. Parameters: ----------- catalog_list : List of Catalog objects. Returns: -------- catalog : Combined Catalog object
[ "Merge", "a", "list", "of", "Catalogs", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L254-L273
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog.applyCut
def applyCut(self, cut): """ Return a new catalog which is a subset of objects selected using the input cut array. NOTE: This is really a *selection* (i.e., objects are retained if the value of 'cut' is True) """ return Catalog(self.config, data=self.data[cut])
python
def applyCut(self, cut): """ Return a new catalog which is a subset of objects selected using the input cut array. NOTE: This is really a *selection* (i.e., objects are retained if the value of 'cut' is True) """ return Catalog(self.config, data=self.data[cut])
[ "def", "applyCut", "(", "self", ",", "cut", ")", ":", "return", "Catalog", "(", "self", ".", "config", ",", "data", "=", "self", ".", "data", "[", "cut", "]", ")" ]
Return a new catalog which is a subset of objects selected using the input cut array. NOTE: This is really a *selection* (i.e., objects are retained if the value of 'cut' is True)
[ "Return", "a", "new", "catalog", "which", "is", "a", "subset", "of", "objects", "selected", "using", "the", "input", "cut", "array", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L54-L61
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog.bootstrap
def bootstrap(self, mc_bit=0x10, seed=None): """ Return a random catalog by boostrapping the colors of the objects in the current catalog. """ if seed is not None: np.random.seed(seed) data = copy.deepcopy(self.data) idx = np.random.randint(0,len(data),len(data)) data[self.config['catalog']['mag_1_field']][:] = self.mag_1[idx] data[self.config['catalog']['mag_err_1_field']][:] = self.mag_err_1[idx] data[self.config['catalog']['mag_2_field']][:] = self.mag_2[idx] data[self.config['catalog']['mag_err_2_field']][:] = self.mag_err_2[idx] data[self.config['catalog']['mc_source_id_field']][:] |= mc_bit return Catalog(self.config, data=data)
python
def bootstrap(self, mc_bit=0x10, seed=None): """ Return a random catalog by boostrapping the colors of the objects in the current catalog. """ if seed is not None: np.random.seed(seed) data = copy.deepcopy(self.data) idx = np.random.randint(0,len(data),len(data)) data[self.config['catalog']['mag_1_field']][:] = self.mag_1[idx] data[self.config['catalog']['mag_err_1_field']][:] = self.mag_err_1[idx] data[self.config['catalog']['mag_2_field']][:] = self.mag_2[idx] data[self.config['catalog']['mag_err_2_field']][:] = self.mag_err_2[idx] data[self.config['catalog']['mc_source_id_field']][:] |= mc_bit return Catalog(self.config, data=data)
[ "def", "bootstrap", "(", "self", ",", "mc_bit", "=", "0x10", ",", "seed", "=", "None", ")", ":", "if", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "data", "=", "copy", ".", "deepcopy", "(", "self", ".", "data", ")", "idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "data", ")", ",", "len", "(", "data", ")", ")", "data", "[", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_1_field'", "]", "]", "[", ":", "]", "=", "self", ".", "mag_1", "[", "idx", "]", "data", "[", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_err_1_field'", "]", "]", "[", ":", "]", "=", "self", ".", "mag_err_1", "[", "idx", "]", "data", "[", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_2_field'", "]", "]", "[", ":", "]", "=", "self", ".", "mag_2", "[", "idx", "]", "data", "[", "self", ".", "config", "[", "'catalog'", "]", "[", "'mag_err_2_field'", "]", "]", "[", ":", "]", "=", "self", ".", "mag_err_2", "[", "idx", "]", "data", "[", "self", ".", "config", "[", "'catalog'", "]", "[", "'mc_source_id_field'", "]", "]", "[", ":", "]", "|=", "mc_bit", "return", "Catalog", "(", "self", ".", "config", ",", "data", "=", "data", ")" ]
Return a random catalog by boostrapping the colors of the objects in the current catalog.
[ "Return", "a", "random", "catalog", "by", "boostrapping", "the", "colors", "of", "the", "objects", "in", "the", "current", "catalog", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L63-L75
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog.project
def project(self, projector = None): """ Project coordinates on sphere to image plane using Projector class. """ msg = "'%s.project': ADW 2018-05-05"%self.__class__.__name__ DeprecationWarning(msg) if projector is None: try: self.projector = ugali.utils.projector.Projector(self.config['coords']['reference'][0], self.config['coords']['reference'][1]) except KeyError: logger.warning('Projection reference point is median (lon, lat) of catalog objects') self.projector = ugali.utils.projector.Projector(np.median(self.lon), np.median(self.lat)) else: self.projector = projector self.x, self.y = self.projector.sphereToImage(self.lon, self.lat)
python
def project(self, projector = None): """ Project coordinates on sphere to image plane using Projector class. """ msg = "'%s.project': ADW 2018-05-05"%self.__class__.__name__ DeprecationWarning(msg) if projector is None: try: self.projector = ugali.utils.projector.Projector(self.config['coords']['reference'][0], self.config['coords']['reference'][1]) except KeyError: logger.warning('Projection reference point is median (lon, lat) of catalog objects') self.projector = ugali.utils.projector.Projector(np.median(self.lon), np.median(self.lat)) else: self.projector = projector self.x, self.y = self.projector.sphereToImage(self.lon, self.lat)
[ "def", "project", "(", "self", ",", "projector", "=", "None", ")", ":", "msg", "=", "\"'%s.project': ADW 2018-05-05\"", "%", "self", ".", "__class__", ".", "__name__", "DeprecationWarning", "(", "msg", ")", "if", "projector", "is", "None", ":", "try", ":", "self", ".", "projector", "=", "ugali", ".", "utils", ".", "projector", ".", "Projector", "(", "self", ".", "config", "[", "'coords'", "]", "[", "'reference'", "]", "[", "0", "]", ",", "self", ".", "config", "[", "'coords'", "]", "[", "'reference'", "]", "[", "1", "]", ")", "except", "KeyError", ":", "logger", ".", "warning", "(", "'Projection reference point is median (lon, lat) of catalog objects'", ")", "self", ".", "projector", "=", "ugali", ".", "utils", ".", "projector", ".", "Projector", "(", "np", ".", "median", "(", "self", ".", "lon", ")", ",", "np", ".", "median", "(", "self", ".", "lat", ")", ")", "else", ":", "self", ".", "projector", "=", "projector", "self", ".", "x", ",", "self", ".", "y", "=", "self", ".", "projector", ".", "sphereToImage", "(", "self", ".", "lon", ",", "self", ".", "lat", ")" ]
Project coordinates on sphere to image plane using Projector class.
[ "Project", "coordinates", "on", "sphere", "to", "image", "plane", "using", "Projector", "class", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L77-L93
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog.spatialBin
def spatialBin(self, roi): """ Calculate indices of ROI pixels corresponding to object locations. """ if hasattr(self,'pixel_roi_index') and hasattr(self,'pixel'): logger.warning('Catalog alread spatially binned') return # ADW: Not safe to set index = -1 (since it will access last entry); # np.inf would be better... self.pixel = ang2pix(self.config['coords']['nside_pixel'],self.lon,self.lat) self.pixel_roi_index = roi.indexROI(self.lon,self.lat) logger.info("Found %i objects outside ROI"%(self.pixel_roi_index < 0).sum())
python
def spatialBin(self, roi): """ Calculate indices of ROI pixels corresponding to object locations. """ if hasattr(self,'pixel_roi_index') and hasattr(self,'pixel'): logger.warning('Catalog alread spatially binned') return # ADW: Not safe to set index = -1 (since it will access last entry); # np.inf would be better... self.pixel = ang2pix(self.config['coords']['nside_pixel'],self.lon,self.lat) self.pixel_roi_index = roi.indexROI(self.lon,self.lat) logger.info("Found %i objects outside ROI"%(self.pixel_roi_index < 0).sum())
[ "def", "spatialBin", "(", "self", ",", "roi", ")", ":", "if", "hasattr", "(", "self", ",", "'pixel_roi_index'", ")", "and", "hasattr", "(", "self", ",", "'pixel'", ")", ":", "logger", ".", "warning", "(", "'Catalog alread spatially binned'", ")", "return", "# ADW: Not safe to set index = -1 (since it will access last entry); ", "# np.inf would be better...", "self", ".", "pixel", "=", "ang2pix", "(", "self", ".", "config", "[", "'coords'", "]", "[", "'nside_pixel'", "]", ",", "self", ".", "lon", ",", "self", ".", "lat", ")", "self", ".", "pixel_roi_index", "=", "roi", ".", "indexROI", "(", "self", ".", "lon", ",", "self", ".", "lat", ")", "logger", ".", "info", "(", "\"Found %i objects outside ROI\"", "%", "(", "self", ".", "pixel_roi_index", "<", "0", ")", ".", "sum", "(", ")", ")" ]
Calculate indices of ROI pixels corresponding to object locations.
[ "Calculate", "indices", "of", "ROI", "pixels", "corresponding", "to", "object", "locations", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L95-L108
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog.write
def write(self, outfile, clobber=True, **kwargs): """ Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None """ fitsio.write(outfile,self.data,clobber=True,**kwargs)
python
def write(self, outfile, clobber=True, **kwargs): """ Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None """ fitsio.write(outfile,self.data,clobber=True,**kwargs)
[ "def", "write", "(", "self", ",", "outfile", ",", "clobber", "=", "True", ",", "*", "*", "kwargs", ")", ":", "fitsio", ".", "write", "(", "outfile", ",", "self", ".", "data", ",", "clobber", "=", "True", ",", "*", "*", "kwargs", ")" ]
Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None
[ "Write", "the", "current", "object", "catalog", "to", "FITS", "file", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L110-L124
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog._parse
def _parse(self, roi=None, filenames=None): """ Parse catalog FITS files into recarray. Parameters: ----------- roi : The region of interest; if 'roi=None', read all catalog files Returns: -------- None """ if (roi is not None) and (filenames is not None): msg = "Cannot take both roi and filenames" raise Exception(msg) if roi is not None: pixels = roi.getCatalogPixels() filenames = self.config.getFilenames()['catalog'][pixels] elif filenames is None: filenames = self.config.getFilenames()['catalog'].compressed() else: filenames = np.atleast_1d(filenames) if len(filenames) == 0: msg = "No catalog files found." raise Exception(msg) # Load the data self.data = load_infiles(filenames) # Apply a selection cut self._applySelection() # Cast data to recarray (historical reasons) self.data = self.data.view(np.recarray)
python
def _parse(self, roi=None, filenames=None): """ Parse catalog FITS files into recarray. Parameters: ----------- roi : The region of interest; if 'roi=None', read all catalog files Returns: -------- None """ if (roi is not None) and (filenames is not None): msg = "Cannot take both roi and filenames" raise Exception(msg) if roi is not None: pixels = roi.getCatalogPixels() filenames = self.config.getFilenames()['catalog'][pixels] elif filenames is None: filenames = self.config.getFilenames()['catalog'].compressed() else: filenames = np.atleast_1d(filenames) if len(filenames) == 0: msg = "No catalog files found." raise Exception(msg) # Load the data self.data = load_infiles(filenames) # Apply a selection cut self._applySelection() # Cast data to recarray (historical reasons) self.data = self.data.view(np.recarray)
[ "def", "_parse", "(", "self", ",", "roi", "=", "None", ",", "filenames", "=", "None", ")", ":", "if", "(", "roi", "is", "not", "None", ")", "and", "(", "filenames", "is", "not", "None", ")", ":", "msg", "=", "\"Cannot take both roi and filenames\"", "raise", "Exception", "(", "msg", ")", "if", "roi", "is", "not", "None", ":", "pixels", "=", "roi", ".", "getCatalogPixels", "(", ")", "filenames", "=", "self", ".", "config", ".", "getFilenames", "(", ")", "[", "'catalog'", "]", "[", "pixels", "]", "elif", "filenames", "is", "None", ":", "filenames", "=", "self", ".", "config", ".", "getFilenames", "(", ")", "[", "'catalog'", "]", ".", "compressed", "(", ")", "else", ":", "filenames", "=", "np", ".", "atleast_1d", "(", "filenames", ")", "if", "len", "(", "filenames", ")", "==", "0", ":", "msg", "=", "\"No catalog files found.\"", "raise", "Exception", "(", "msg", ")", "# Load the data", "self", ".", "data", "=", "load_infiles", "(", "filenames", ")", "# Apply a selection cut", "self", ".", "_applySelection", "(", ")", "# Cast data to recarray (historical reasons)", "self", ".", "data", "=", "self", ".", "data", ".", "view", "(", "np", ".", "recarray", ")" ]
Parse catalog FITS files into recarray. Parameters: ----------- roi : The region of interest; if 'roi=None', read all catalog files Returns: -------- None
[ "Parse", "catalog", "FITS", "files", "into", "recarray", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L126-L161
train
DarkEnergySurvey/ugali
ugali/observation/catalog.py
Catalog._defineVariables
def _defineVariables(self): """ Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties. """ logger.info('Catalog contains %i objects'%(len(self.data))) mc_source_id_field = self.config['catalog']['mc_source_id_field'] if mc_source_id_field is not None: if mc_source_id_field not in self.data.dtype.names: array = np.zeros(len(self.data),dtype='>i8') # FITS byte-order convention self.data = mlab.rec_append_fields(self.data, names=mc_source_id_field, arrs=array) logger.info('Found %i simulated objects'%(np.sum(self.mc_source_id>0)))
python
def _defineVariables(self): """ Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties. """ logger.info('Catalog contains %i objects'%(len(self.data))) mc_source_id_field = self.config['catalog']['mc_source_id_field'] if mc_source_id_field is not None: if mc_source_id_field not in self.data.dtype.names: array = np.zeros(len(self.data),dtype='>i8') # FITS byte-order convention self.data = mlab.rec_append_fields(self.data, names=mc_source_id_field, arrs=array) logger.info('Found %i simulated objects'%(np.sum(self.mc_source_id>0)))
[ "def", "_defineVariables", "(", "self", ")", ":", "logger", ".", "info", "(", "'Catalog contains %i objects'", "%", "(", "len", "(", "self", ".", "data", ")", ")", ")", "mc_source_id_field", "=", "self", ".", "config", "[", "'catalog'", "]", "[", "'mc_source_id_field'", "]", "if", "mc_source_id_field", "is", "not", "None", ":", "if", "mc_source_id_field", "not", "in", "self", ".", "data", ".", "dtype", ".", "names", ":", "array", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "data", ")", ",", "dtype", "=", "'>i8'", ")", "# FITS byte-order convention", "self", ".", "data", "=", "mlab", ".", "rec_append_fields", "(", "self", ".", "data", ",", "names", "=", "mc_source_id_field", ",", "arrs", "=", "array", ")", "logger", ".", "info", "(", "'Found %i simulated objects'", "%", "(", "np", ".", "sum", "(", "self", ".", "mc_source_id", ">", "0", ")", ")", ")" ]
Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties.
[ "Helper", "funtion", "to", "define", "pertinent", "variables", "from", "catalog", "data", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/catalog.py#L178-L193
train
consbio/ncdjango
ncdjango/geoprocessing/workflow.py
Workflow.add_node
def add_node(self, node_id, task, inputs): """ Adds a node to the workflow. :param node_id: A unique identifier for the new node. :param task: The task to run. :param inputs: A mapping of inputs from workflow inputs, or outputs from other nodes. The format should be `{input_name: (source, value), ...}` where `input_name` is the parameter name for the task input, source is "input" or "dependency" and `value` is either the workflow input name (if source is "input") or a 2-tuple with a node id and an output parameter name from that node's task to map to the input. """ if node_id in self.nodes_by_id: raise ValueError('The node {0} already exists in this workflow.'.format(node_id)) node = WorkflowNode(node_id, task, inputs) self.nodes_by_id[node_id] = node for source, value in six.itervalues(inputs): if source == 'dependency': dependents = self.dependents_by_node_id.get(value[0], set()) dependents.add(node_id) self.dependents_by_node_id[value[0]] = dependents
python
def add_node(self, node_id, task, inputs): """ Adds a node to the workflow. :param node_id: A unique identifier for the new node. :param task: The task to run. :param inputs: A mapping of inputs from workflow inputs, or outputs from other nodes. The format should be `{input_name: (source, value), ...}` where `input_name` is the parameter name for the task input, source is "input" or "dependency" and `value` is either the workflow input name (if source is "input") or a 2-tuple with a node id and an output parameter name from that node's task to map to the input. """ if node_id in self.nodes_by_id: raise ValueError('The node {0} already exists in this workflow.'.format(node_id)) node = WorkflowNode(node_id, task, inputs) self.nodes_by_id[node_id] = node for source, value in six.itervalues(inputs): if source == 'dependency': dependents = self.dependents_by_node_id.get(value[0], set()) dependents.add(node_id) self.dependents_by_node_id[value[0]] = dependents
[ "def", "add_node", "(", "self", ",", "node_id", ",", "task", ",", "inputs", ")", ":", "if", "node_id", "in", "self", ".", "nodes_by_id", ":", "raise", "ValueError", "(", "'The node {0} already exists in this workflow.'", ".", "format", "(", "node_id", ")", ")", "node", "=", "WorkflowNode", "(", "node_id", ",", "task", ",", "inputs", ")", "self", ".", "nodes_by_id", "[", "node_id", "]", "=", "node", "for", "source", ",", "value", "in", "six", ".", "itervalues", "(", "inputs", ")", ":", "if", "source", "==", "'dependency'", ":", "dependents", "=", "self", ".", "dependents_by_node_id", ".", "get", "(", "value", "[", "0", "]", ",", "set", "(", ")", ")", "dependents", ".", "add", "(", "node_id", ")", "self", ".", "dependents_by_node_id", "[", "value", "[", "0", "]", "]", "=", "dependents" ]
Adds a node to the workflow. :param node_id: A unique identifier for the new node. :param task: The task to run. :param inputs: A mapping of inputs from workflow inputs, or outputs from other nodes. The format should be `{input_name: (source, value), ...}` where `input_name` is the parameter name for the task input, source is "input" or "dependency" and `value` is either the workflow input name (if source is "input") or a 2-tuple with a node id and an output parameter name from that node's task to map to the input.
[ "Adds", "a", "node", "to", "the", "workflow", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/workflow.py#L179-L201
train
consbio/ncdjango
ncdjango/geoprocessing/workflow.py
Workflow.map_output
def map_output(self, node_id, node_output_name, parameter_name): """ Maps the output from a node to a workflow output. :param node_id: The id of the node to map from. :param node_output_name: The output parameter name for the node task to map to the workflow output. :param parameter_name: The workflow output parameter name. """ self.output_mapping[parameter_name] = (node_id, node_output_name) dependents = self.dependents_by_node_id.get(node_id, set()) dependents.add('output_{}'.format(parameter_name)) self.dependents_by_node_id[node_id] = dependents
python
def map_output(self, node_id, node_output_name, parameter_name): """ Maps the output from a node to a workflow output. :param node_id: The id of the node to map from. :param node_output_name: The output parameter name for the node task to map to the workflow output. :param parameter_name: The workflow output parameter name. """ self.output_mapping[parameter_name] = (node_id, node_output_name) dependents = self.dependents_by_node_id.get(node_id, set()) dependents.add('output_{}'.format(parameter_name)) self.dependents_by_node_id[node_id] = dependents
[ "def", "map_output", "(", "self", ",", "node_id", ",", "node_output_name", ",", "parameter_name", ")", ":", "self", ".", "output_mapping", "[", "parameter_name", "]", "=", "(", "node_id", ",", "node_output_name", ")", "dependents", "=", "self", ".", "dependents_by_node_id", ".", "get", "(", "node_id", ",", "set", "(", ")", ")", "dependents", ".", "add", "(", "'output_{}'", ".", "format", "(", "parameter_name", ")", ")", "self", ".", "dependents_by_node_id", "[", "node_id", "]", "=", "dependents" ]
Maps the output from a node to a workflow output. :param node_id: The id of the node to map from. :param node_output_name: The output parameter name for the node task to map to the workflow output. :param parameter_name: The workflow output parameter name.
[ "Maps", "the", "output", "from", "a", "node", "to", "a", "workflow", "output", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/workflow.py#L203-L216
train
consbio/ncdjango
ncdjango/geoprocessing/workflow.py
Workflow.to_json
def to_json(self, indent=None): """Serialize this workflow to JSON""" inputs = ParameterCollection(self.inputs) d = { 'meta': { 'name': self.name, 'description': self.description }, 'inputs': [], 'workflow': [], 'outputs': [{'name': k, 'node': v} for k, v in six.iteritems(self.output_mapping)] } for parameter in self.inputs: input_info = { 'name': parameter.name, 'type': parameter.id } args, kwargs = parameter.serialize_args() args = list(args) args.pop(0) # 'name' is already taken care of kwargs.pop('required', None) # 'required' is assumed True for workflow inputs if args or kwargs: input_info['args'] = [args, kwargs] d['inputs'].append(input_info) for node in sorted(six.itervalues(self.nodes_by_id), key=lambda x: x.id): task_name = node.task.name if not task_name: raise ValueError('The task {0} does not have a name and therefore cannot be serialized.'.format( node.task.__class__.__name__) ) node_inputs = {} for input_name, (source, value) in six.iteritems(node.inputs): input_info = {'source': source} if source == 'input': input_info['input'] = inputs.by_name[value].name else: input_info['node'] = value node_inputs[input_name] = input_info d['workflow'].append({ 'id': node.id, 'task': task_name, 'inputs': node_inputs }) return json.dumps(d, indent=indent)
python
def to_json(self, indent=None): """Serialize this workflow to JSON""" inputs = ParameterCollection(self.inputs) d = { 'meta': { 'name': self.name, 'description': self.description }, 'inputs': [], 'workflow': [], 'outputs': [{'name': k, 'node': v} for k, v in six.iteritems(self.output_mapping)] } for parameter in self.inputs: input_info = { 'name': parameter.name, 'type': parameter.id } args, kwargs = parameter.serialize_args() args = list(args) args.pop(0) # 'name' is already taken care of kwargs.pop('required', None) # 'required' is assumed True for workflow inputs if args or kwargs: input_info['args'] = [args, kwargs] d['inputs'].append(input_info) for node in sorted(six.itervalues(self.nodes_by_id), key=lambda x: x.id): task_name = node.task.name if not task_name: raise ValueError('The task {0} does not have a name and therefore cannot be serialized.'.format( node.task.__class__.__name__) ) node_inputs = {} for input_name, (source, value) in six.iteritems(node.inputs): input_info = {'source': source} if source == 'input': input_info['input'] = inputs.by_name[value].name else: input_info['node'] = value node_inputs[input_name] = input_info d['workflow'].append({ 'id': node.id, 'task': task_name, 'inputs': node_inputs }) return json.dumps(d, indent=indent)
[ "def", "to_json", "(", "self", ",", "indent", "=", "None", ")", ":", "inputs", "=", "ParameterCollection", "(", "self", ".", "inputs", ")", "d", "=", "{", "'meta'", ":", "{", "'name'", ":", "self", ".", "name", ",", "'description'", ":", "self", ".", "description", "}", ",", "'inputs'", ":", "[", "]", ",", "'workflow'", ":", "[", "]", ",", "'outputs'", ":", "[", "{", "'name'", ":", "k", ",", "'node'", ":", "v", "}", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "self", ".", "output_mapping", ")", "]", "}", "for", "parameter", "in", "self", ".", "inputs", ":", "input_info", "=", "{", "'name'", ":", "parameter", ".", "name", ",", "'type'", ":", "parameter", ".", "id", "}", "args", ",", "kwargs", "=", "parameter", ".", "serialize_args", "(", ")", "args", "=", "list", "(", "args", ")", "args", ".", "pop", "(", "0", ")", "# 'name' is already taken care of", "kwargs", ".", "pop", "(", "'required'", ",", "None", ")", "# 'required' is assumed True for workflow inputs", "if", "args", "or", "kwargs", ":", "input_info", "[", "'args'", "]", "=", "[", "args", ",", "kwargs", "]", "d", "[", "'inputs'", "]", ".", "append", "(", "input_info", ")", "for", "node", "in", "sorted", "(", "six", ".", "itervalues", "(", "self", ".", "nodes_by_id", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "id", ")", ":", "task_name", "=", "node", ".", "task", ".", "name", "if", "not", "task_name", ":", "raise", "ValueError", "(", "'The task {0} does not have a name and therefore cannot be serialized.'", ".", "format", "(", "node", ".", "task", ".", "__class__", ".", "__name__", ")", ")", "node_inputs", "=", "{", "}", "for", "input_name", ",", "(", "source", ",", "value", ")", "in", "six", ".", "iteritems", "(", "node", ".", "inputs", ")", ":", "input_info", "=", "{", "'source'", ":", "source", "}", "if", "source", "==", "'input'", ":", "input_info", "[", "'input'", "]", "=", "inputs", ".", "by_name", "[", "value", "]", ".", "name", "else", ":", "input_info", "[", "'node'", "]", "=", "value", "node_inputs", "[", "input_name", "]", "=", "input_info", "d", "[", "'workflow'", "]", ".", "append", "(", "{", "'id'", ":", "node", ".", "id", ",", "'task'", ":", "task_name", ",", "'inputs'", ":", "node_inputs", "}", ")", "return", "json", ".", "dumps", "(", "d", ",", "indent", "=", "indent", ")" ]
Serialize this workflow to JSON
[ "Serialize", "this", "workflow", "to", "JSON" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/workflow.py#L218-L272
train
consbio/ncdjango
ncdjango/geoprocessing/workflow.py
Workflow.from_json
def from_json(cls, text): """Return a new workflow, deserialized from a JSON string""" d = json.loads(text) meta = d.get('meta', {}) workflow = cls(name=meta.get('name'), description=meta.get('description')) for workflow_input in d.get('inputs', []): parameter_cls = Parameter.by_id(workflow_input['type']) args = [workflow_input['name']] kwargs = {'required': True} if workflow_input.get('args'): args = workflow_input['args'][0] + args kwargs.update(workflow_input['args'][1]) args, kwargs = parameter_cls.deserialize_args(args, kwargs) workflow.inputs.append(parameter_cls(*args, **kwargs)) for node in d.get('workflow', []): node_inputs = {} for k, v in six.iteritems(node.get('inputs', {})): node_inputs[k] = (v['source'], v.get('input') or v.get('node')) workflow.add_node(node['id'], Task.by_name(node['task'])(), node_inputs) for output in d.get('outputs', []): node = output['node'] node_parameters = ParameterCollection(workflow.nodes_by_id[node[0]].task.outputs) # Add parameter to workflow output output_param = copy.copy(node_parameters.by_name[node[1]]) output_param.name = output['name'] workflow.outputs.append(output_param) workflow.map_output(node[0], node[1], output['name']) return workflow
python
def from_json(cls, text): """Return a new workflow, deserialized from a JSON string""" d = json.loads(text) meta = d.get('meta', {}) workflow = cls(name=meta.get('name'), description=meta.get('description')) for workflow_input in d.get('inputs', []): parameter_cls = Parameter.by_id(workflow_input['type']) args = [workflow_input['name']] kwargs = {'required': True} if workflow_input.get('args'): args = workflow_input['args'][0] + args kwargs.update(workflow_input['args'][1]) args, kwargs = parameter_cls.deserialize_args(args, kwargs) workflow.inputs.append(parameter_cls(*args, **kwargs)) for node in d.get('workflow', []): node_inputs = {} for k, v in six.iteritems(node.get('inputs', {})): node_inputs[k] = (v['source'], v.get('input') or v.get('node')) workflow.add_node(node['id'], Task.by_name(node['task'])(), node_inputs) for output in d.get('outputs', []): node = output['node'] node_parameters = ParameterCollection(workflow.nodes_by_id[node[0]].task.outputs) # Add parameter to workflow output output_param = copy.copy(node_parameters.by_name[node[1]]) output_param.name = output['name'] workflow.outputs.append(output_param) workflow.map_output(node[0], node[1], output['name']) return workflow
[ "def", "from_json", "(", "cls", ",", "text", ")", ":", "d", "=", "json", ".", "loads", "(", "text", ")", "meta", "=", "d", ".", "get", "(", "'meta'", ",", "{", "}", ")", "workflow", "=", "cls", "(", "name", "=", "meta", ".", "get", "(", "'name'", ")", ",", "description", "=", "meta", ".", "get", "(", "'description'", ")", ")", "for", "workflow_input", "in", "d", ".", "get", "(", "'inputs'", ",", "[", "]", ")", ":", "parameter_cls", "=", "Parameter", ".", "by_id", "(", "workflow_input", "[", "'type'", "]", ")", "args", "=", "[", "workflow_input", "[", "'name'", "]", "]", "kwargs", "=", "{", "'required'", ":", "True", "}", "if", "workflow_input", ".", "get", "(", "'args'", ")", ":", "args", "=", "workflow_input", "[", "'args'", "]", "[", "0", "]", "+", "args", "kwargs", ".", "update", "(", "workflow_input", "[", "'args'", "]", "[", "1", "]", ")", "args", ",", "kwargs", "=", "parameter_cls", ".", "deserialize_args", "(", "args", ",", "kwargs", ")", "workflow", ".", "inputs", ".", "append", "(", "parameter_cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "for", "node", "in", "d", ".", "get", "(", "'workflow'", ",", "[", "]", ")", ":", "node_inputs", "=", "{", "}", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "node", ".", "get", "(", "'inputs'", ",", "{", "}", ")", ")", ":", "node_inputs", "[", "k", "]", "=", "(", "v", "[", "'source'", "]", ",", "v", ".", "get", "(", "'input'", ")", "or", "v", ".", "get", "(", "'node'", ")", ")", "workflow", ".", "add_node", "(", "node", "[", "'id'", "]", ",", "Task", ".", "by_name", "(", "node", "[", "'task'", "]", ")", "(", ")", ",", "node_inputs", ")", "for", "output", "in", "d", ".", "get", "(", "'outputs'", ",", "[", "]", ")", ":", "node", "=", "output", "[", "'node'", "]", "node_parameters", "=", "ParameterCollection", "(", "workflow", ".", "nodes_by_id", "[", "node", "[", "0", "]", "]", ".", "task", ".", "outputs", ")", "# Add parameter to workflow output", "output_param", "=", "copy", ".", "copy", "(", "node_parameters", ".", "by_name", "[", "node", "[", "1", "]", "]", ")", "output_param", ".", "name", "=", "output", "[", "'name'", "]", "workflow", ".", "outputs", ".", "append", "(", "output_param", ")", "workflow", ".", "map_output", "(", "node", "[", "0", "]", ",", "node", "[", "1", "]", ",", "output", "[", "'name'", "]", ")", "return", "workflow" ]
Return a new workflow, deserialized from a JSON string
[ "Return", "a", "new", "workflow", "deserialized", "from", "a", "JSON", "string" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/workflow.py#L275-L313
train
MasterKale/django-cra-helper
cra_helper/management/commands/runserver.py
Command.get_handler
def get_handler(self, *args, **options): ''' Return the static files serving handler wrapping the default handler, if static files should be served. Otherwise return the default handler. ''' handler = super().get_handler(*args, **options) use_static_handler = options['use_static_handler'] insecure_serving = options['insecure_serving'] if use_static_handler and (settings.DEBUG or insecure_serving): return CRAStaticFilesHandler(handler) return handler
python
def get_handler(self, *args, **options): ''' Return the static files serving handler wrapping the default handler, if static files should be served. Otherwise return the default handler. ''' handler = super().get_handler(*args, **options) use_static_handler = options['use_static_handler'] insecure_serving = options['insecure_serving'] if use_static_handler and (settings.DEBUG or insecure_serving): return CRAStaticFilesHandler(handler) return handler
[ "def", "get_handler", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "handler", "=", "super", "(", ")", ".", "get_handler", "(", "*", "args", ",", "*", "*", "options", ")", "use_static_handler", "=", "options", "[", "'use_static_handler'", "]", "insecure_serving", "=", "options", "[", "'insecure_serving'", "]", "if", "use_static_handler", "and", "(", "settings", ".", "DEBUG", "or", "insecure_serving", ")", ":", "return", "CRAStaticFilesHandler", "(", "handler", ")", "return", "handler" ]
Return the static files serving handler wrapping the default handler, if static files should be served. Otherwise return the default handler.
[ "Return", "the", "static", "files", "serving", "handler", "wrapping", "the", "default", "handler", "if", "static", "files", "should", "be", "served", ".", "Otherwise", "return", "the", "default", "handler", "." ]
ba50c643c181a18b80ee9bbdbea74b58abd6daad
https://github.com/MasterKale/django-cra-helper/blob/ba50c643c181a18b80ee9bbdbea74b58abd6daad/cra_helper/management/commands/runserver.py#L11-L21
train