repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
markfinger/assembla
assembla/api.py
WikiPage.write
def write(self): """ Create or update a Wiki Page on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A WikiPage must have a 'space' attribute before you can write it to Assembla.") self.api = self.space.api if self.get('id'): # We are modifying an existing wiki page return self.api._put_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), id_field='id' ) else: # Creating a new wiki page return self.api._post_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), )
python
def write(self): """ Create or update a Wiki Page on Assembla """ if not hasattr(self, 'space'): raise AttributeError("A WikiPage must have a 'space' attribute before you can write it to Assembla.") self.api = self.space.api if self.get('id'): # We are modifying an existing wiki page return self.api._put_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), id_field='id' ) else: # Creating a new wiki page return self.api._post_json( self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), )
[ "def", "write", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'space'", ")", ":", "raise", "AttributeError", "(", "\"A WikiPage must have a 'space' attribute before you can write it to Assembla.\"", ")", "self", ".", "api", "=", "self", ".", "space", ".", "api", "if", "self", ".", "get", "(", "'id'", ")", ":", "# We are modifying an existing wiki page", "return", "self", ".", "api", ".", "_put_json", "(", "self", ",", "space", "=", "self", ".", "space", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'wiki_pages'", ")", ",", "id_field", "=", "'id'", ")", "else", ":", "# Creating a new wiki page", "return", "self", ".", "api", ".", "_post_json", "(", "self", ",", "space", "=", "self", ".", "space", ",", "rel_path", "=", "self", ".", "space", ".", "_build_rel_path", "(", "'wiki_pages'", ")", ",", ")" ]
Create or update a Wiki Page on Assembla
[ "Create", "or", "update", "a", "Wiki", "Page", "on", "Assembla" ]
967a77a5ba718df94f60e832b6e0cf14c72426aa
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L573-L594
train
Genida/dependenpy
src/dependenpy/finder.py
PackageSpec.add
def add(self, spec): """ Add limitations of given spec to self's. Args: spec (PackageSpec): another spec. """ for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit)
python
def add(self, spec): """ Add limitations of given spec to self's. Args: spec (PackageSpec): another spec. """ for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit)
[ "def", "add", "(", "self", ",", "spec", ")", ":", "for", "limit", "in", "spec", ".", "limit_to", ":", "if", "limit", "not", "in", "self", ".", "limit_to", ":", "self", ".", "limit_to", ".", "append", "(", "limit", ")" ]
Add limitations of given spec to self's. Args: spec (PackageSpec): another spec.
[ "Add", "limitations", "of", "given", "spec", "to", "self", "s", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/finder.py#L33-L42
train
Genida/dependenpy
src/dependenpy/finder.py
PackageSpec.combine
def combine(specs): """ Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec. """ new_specs = {} for spec in specs: if new_specs.get(spec, None) is None: new_specs[spec] = spec else: new_specs[spec].add(spec) return list(new_specs.values())
python
def combine(specs): """ Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec. """ new_specs = {} for spec in specs: if new_specs.get(spec, None) is None: new_specs[spec] = spec else: new_specs[spec].add(spec) return list(new_specs.values())
[ "def", "combine", "(", "specs", ")", ":", "new_specs", "=", "{", "}", "for", "spec", "in", "specs", ":", "if", "new_specs", ".", "get", "(", "spec", ",", "None", ")", "is", "None", ":", "new_specs", "[", "spec", "]", "=", "spec", "else", ":", "new_specs", "[", "spec", "]", ".", "add", "(", "spec", ")", "return", "list", "(", "new_specs", ".", "values", "(", ")", ")" ]
Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec.
[ "Combine", "package", "specifications", "limitations", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/finder.py#L45-L61
train
Genida/dependenpy
src/dependenpy/finder.py
Finder.find
def find(self, package, **kwargs): """ Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None """ for finder in self.finders: package_spec = finder.find(package, **kwargs) if package_spec: return package_spec return None
python
def find(self, package, **kwargs): """ Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None """ for finder in self.finders: package_spec = finder.find(package, **kwargs) if package_spec: return package_spec return None
[ "def", "find", "(", "self", ",", "package", ",", "*", "*", "kwargs", ")", ":", "for", "finder", "in", "self", ".", "finders", ":", "package_spec", "=", "finder", ".", "find", "(", "package", ",", "*", "*", "kwargs", ")", "if", "package_spec", ":", "return", "package_spec", "return", "None" ]
Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None
[ "Find", "a", "package", "using", "package", "finders", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/finder.py#L163-L180
train
noahbenson/pimms
pimms/util.py
_lazy_turbo_mapping
def _lazy_turbo_mapping(initial, pre_size): ''' _lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except it works for lazy maps; this seems like the only way to fully overload PMap. ''' size = pre_size or (2 * len(initial)) or 8 buckets = size * [None] if not isinstance(initial, colls.Mapping): initial = dict(initial) for k, v in six.iteritems(initial): h = hash(k) index = h % size bucket = buckets[index] if bucket: bucket.append((k, v)) else: buckets[index] = [(k, v)] return LazyPMap(len(initial), ps.pvector().extend(buckets))
python
def _lazy_turbo_mapping(initial, pre_size): ''' _lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except it works for lazy maps; this seems like the only way to fully overload PMap. ''' size = pre_size or (2 * len(initial)) or 8 buckets = size * [None] if not isinstance(initial, colls.Mapping): initial = dict(initial) for k, v in six.iteritems(initial): h = hash(k) index = h % size bucket = buckets[index] if bucket: bucket.append((k, v)) else: buckets[index] = [(k, v)] return LazyPMap(len(initial), ps.pvector().extend(buckets))
[ "def", "_lazy_turbo_mapping", "(", "initial", ",", "pre_size", ")", ":", "size", "=", "pre_size", "or", "(", "2", "*", "len", "(", "initial", ")", ")", "or", "8", "buckets", "=", "size", "*", "[", "None", "]", "if", "not", "isinstance", "(", "initial", ",", "colls", ".", "Mapping", ")", ":", "initial", "=", "dict", "(", "initial", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "initial", ")", ":", "h", "=", "hash", "(", "k", ")", "index", "=", "h", "%", "size", "bucket", "=", "buckets", "[", "index", "]", "if", "bucket", ":", "bucket", ".", "append", "(", "(", "k", ",", "v", ")", ")", "else", ":", "buckets", "[", "index", "]", "=", "[", "(", "k", ",", "v", ")", "]", "return", "LazyPMap", "(", "len", "(", "initial", ")", ",", "ps", ".", "pvector", "(", ")", ".", "extend", "(", "buckets", ")", ")" ]
_lazy_turbo_mapping is a blatant copy of the pyrsistent._pmap._turbo_mapping function, except it works for lazy maps; this seems like the only way to fully overload PMap.
[ "_lazy_turbo_mapping", "is", "a", "blatant", "copy", "of", "the", "pyrsistent", ".", "_pmap", ".", "_turbo_mapping", "function", "except", "it", "works", "for", "lazy", "maps", ";", "this", "seems", "like", "the", "only", "way", "to", "fully", "overload", "PMap", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L686-L700
train
noahbenson/pimms
pimms/util.py
lazy_map
def lazy_map(initial={}, pre_size=0): ''' lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps. ''' if is_lazy_map(initial): return initial if not initial: return _EMPTY_LMAP return _lazy_turbo_mapping(initial, pre_size)
python
def lazy_map(initial={}, pre_size=0): ''' lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps. ''' if is_lazy_map(initial): return initial if not initial: return _EMPTY_LMAP return _lazy_turbo_mapping(initial, pre_size)
[ "def", "lazy_map", "(", "initial", "=", "{", "}", ",", "pre_size", "=", "0", ")", ":", "if", "is_lazy_map", "(", "initial", ")", ":", "return", "initial", "if", "not", "initial", ":", "return", "_EMPTY_LMAP", "return", "_lazy_turbo_mapping", "(", "initial", ",", "pre_size", ")" ]
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
[ "lazy_map", "is", "a", "blatant", "copy", "of", "the", "pyrsistent", ".", "pmap", "function", "and", "is", "used", "to", "create", "lazy", "maps", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L702-L708
train
noahbenson/pimms
pimms/util.py
LazyPMap._examine_val
def _examine_val(self, k, val): 'should only be called internally' if not isinstance(val, (types.FunctionType, partial)): return val vid = id(val) if vid in self._memoized: return self._memoized[vid] elif [] != getargspec_py27like(val)[0]: return val else: val = val() object.__setattr__(self, '_memoized', self._memoized.set(vid, val)) return val
python
def _examine_val(self, k, val): 'should only be called internally' if not isinstance(val, (types.FunctionType, partial)): return val vid = id(val) if vid in self._memoized: return self._memoized[vid] elif [] != getargspec_py27like(val)[0]: return val else: val = val() object.__setattr__(self, '_memoized', self._memoized.set(vid, val)) return val
[ "def", "_examine_val", "(", "self", ",", "k", ",", "val", ")", ":", "if", "not", "isinstance", "(", "val", ",", "(", "types", ".", "FunctionType", ",", "partial", ")", ")", ":", "return", "val", "vid", "=", "id", "(", "val", ")", "if", "vid", "in", "self", ".", "_memoized", ":", "return", "self", ".", "_memoized", "[", "vid", "]", "elif", "[", "]", "!=", "getargspec_py27like", "(", "val", ")", "[", "0", "]", ":", "return", "val", "else", ":", "val", "=", "val", "(", ")", "object", ".", "__setattr__", "(", "self", ",", "'_memoized'", ",", "self", ".", "_memoized", ".", "set", "(", "vid", ",", "val", ")", ")", "return", "val" ]
should only be called internally
[ "should", "only", "be", "called", "internally" ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L582-L593
train
praekeltfoundation/seed-message-sender
message_sender/signals.py
psh_fire_msg_action_if_new
def psh_fire_msg_action_if_new(sender, instance, created, **kwargs): """ Post save hook to fire message send task """ if created: from message_sender.tasks import send_message send_message.apply_async(kwargs={"message_id": str(instance.id)})
python
def psh_fire_msg_action_if_new(sender, instance, created, **kwargs): """ Post save hook to fire message send task """ if created: from message_sender.tasks import send_message send_message.apply_async(kwargs={"message_id": str(instance.id)})
[ "def", "psh_fire_msg_action_if_new", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "created", ":", "from", "message_sender", ".", "tasks", "import", "send_message", "send_message", ".", "apply_async", "(", "kwargs", "=", "{", "\"message_id\"", ":", "str", "(", "instance", ".", "id", ")", "}", ")" ]
Post save hook to fire message send task
[ "Post", "save", "hook", "to", "fire", "message", "send", "task" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/signals.py#L4-L10
train
praekeltfoundation/seed-message-sender
message_sender/signals.py
update_default_channels
def update_default_channels(sender, instance, created, **kwargs): """ Post save hook to ensure that there is only one default """ if instance.default: Channel.objects.filter(default=True).exclude( channel_id=instance.channel_id ).update(default=False)
python
def update_default_channels(sender, instance, created, **kwargs): """ Post save hook to ensure that there is only one default """ if instance.default: Channel.objects.filter(default=True).exclude( channel_id=instance.channel_id ).update(default=False)
[ "def", "update_default_channels", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "default", ":", "Channel", ".", "objects", ".", "filter", "(", "default", "=", "True", ")", ".", "exclude", "(", "channel_id", "=", "instance", ".", "channel_id", ")", ".", "update", "(", "default", "=", "False", ")" ]
Post save hook to ensure that there is only one default
[ "Post", "save", "hook", "to", "ensure", "that", "there", "is", "only", "one", "default" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/signals.py#L13-L19
train
romanorac/discomll
discomll/classification/linear_svm.py
map_fit
def map_fit(interface, state, label, inp): """ Function calculates matrices ete and etde for every sample, aggregates and output them. """ import numpy as np ete, etde = 0, 0 out = interface.output(0) for row in inp: row = row.strip().split(state["delimiter"]) # split row if len(row) > 1: # check if row is empty # intercept term is added to every sample x = np.array([(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if i in state["X_indices"]] + [-1]) # map label value to 1 or -1. If label does not match set error y = 1 if state["y_map"][0] == row[state["y_index"]] else -1 if state["y_map"][1] == row[ state["y_index"]] else "Error" ete += np.outer(x, x) etde += x * y out.add("etde", etde) for i, row in enumerate(ete): out.add(i, row)
python
def map_fit(interface, state, label, inp): """ Function calculates matrices ete and etde for every sample, aggregates and output them. """ import numpy as np ete, etde = 0, 0 out = interface.output(0) for row in inp: row = row.strip().split(state["delimiter"]) # split row if len(row) > 1: # check if row is empty # intercept term is added to every sample x = np.array([(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if i in state["X_indices"]] + [-1]) # map label value to 1 or -1. If label does not match set error y = 1 if state["y_map"][0] == row[state["y_index"]] else -1 if state["y_map"][1] == row[ state["y_index"]] else "Error" ete += np.outer(x, x) etde += x * y out.add("etde", etde) for i, row in enumerate(ete): out.add(i, row)
[ "def", "map_fit", "(", "interface", ",", "state", ",", "label", ",", "inp", ")", ":", "import", "numpy", "as", "np", "ete", ",", "etde", "=", "0", ",", "0", "out", "=", "interface", ".", "output", "(", "0", ")", "for", "row", "in", "inp", ":", "row", "=", "row", ".", "strip", "(", ")", ".", "split", "(", "state", "[", "\"delimiter\"", "]", ")", "# split row", "if", "len", "(", "row", ")", ">", "1", ":", "# check if row is empty", "# intercept term is added to every sample", "x", "=", "np", ".", "array", "(", "[", "(", "0", "if", "v", "in", "state", "[", "\"missing_vals\"", "]", "else", "float", "(", "v", ")", ")", "for", "i", ",", "v", "in", "enumerate", "(", "row", ")", "if", "i", "in", "state", "[", "\"X_indices\"", "]", "]", "+", "[", "-", "1", "]", ")", "# map label value to 1 or -1. If label does not match set error", "y", "=", "1", "if", "state", "[", "\"y_map\"", "]", "[", "0", "]", "==", "row", "[", "state", "[", "\"y_index\"", "]", "]", "else", "-", "1", "if", "state", "[", "\"y_map\"", "]", "[", "1", "]", "==", "row", "[", "state", "[", "\"y_index\"", "]", "]", "else", "\"Error\"", "ete", "+=", "np", ".", "outer", "(", "x", ",", "x", ")", "etde", "+=", "x", "*", "y", "out", ".", "add", "(", "\"etde\"", ",", "etde", ")", "for", "i", ",", "row", "in", "enumerate", "(", "ete", ")", ":", "out", ".", "add", "(", "i", ",", "row", ")" ]
Function calculates matrices ete and etde for every sample, aggregates and output them.
[ "Function", "calculates", "matrices", "ete", "and", "etde", "for", "every", "sample", "aggregates", "and", "output", "them", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/linear_svm.py#L15-L36
train
romanorac/discomll
discomll/classification/linear_svm.py
reduce_fit
def reduce_fit(interface, state, label, inp): """ Function joins all partially calculated matrices ETE and ETDe, aggregates them and it calculates final parameters. """ import numpy as np out = interface.output(0) sum_etde = 0 sum_ete = [0 for _ in range(len(state["X_indices"]) + 1)] for key, value in inp: if key == "etde": sum_etde += value else: sum_ete[key] += value sum_ete += np.true_divide(np.eye(len(sum_ete)), state["nu"]) out.add("params", np.linalg.lstsq(sum_ete, sum_etde)[0])
python
def reduce_fit(interface, state, label, inp): """ Function joins all partially calculated matrices ETE and ETDe, aggregates them and it calculates final parameters. """ import numpy as np out = interface.output(0) sum_etde = 0 sum_ete = [0 for _ in range(len(state["X_indices"]) + 1)] for key, value in inp: if key == "etde": sum_etde += value else: sum_ete[key] += value sum_ete += np.true_divide(np.eye(len(sum_ete)), state["nu"]) out.add("params", np.linalg.lstsq(sum_ete, sum_etde)[0])
[ "def", "reduce_fit", "(", "interface", ",", "state", ",", "label", ",", "inp", ")", ":", "import", "numpy", "as", "np", "out", "=", "interface", ".", "output", "(", "0", ")", "sum_etde", "=", "0", "sum_ete", "=", "[", "0", "for", "_", "in", "range", "(", "len", "(", "state", "[", "\"X_indices\"", "]", ")", "+", "1", ")", "]", "for", "key", ",", "value", "in", "inp", ":", "if", "key", "==", "\"etde\"", ":", "sum_etde", "+=", "value", "else", ":", "sum_ete", "[", "key", "]", "+=", "value", "sum_ete", "+=", "np", ".", "true_divide", "(", "np", ".", "eye", "(", "len", "(", "sum_ete", ")", ")", ",", "state", "[", "\"nu\"", "]", ")", "out", ".", "add", "(", "\"params\"", ",", "np", ".", "linalg", ".", "lstsq", "(", "sum_ete", ",", "sum_etde", ")", "[", "0", "]", ")" ]
Function joins all partially calculated matrices ETE and ETDe, aggregates them and it calculates final parameters.
[ "Function", "joins", "all", "partially", "calculated", "matrices", "ETE", "and", "ETDe", "aggregates", "them", "and", "it", "calculates", "final", "parameters", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/linear_svm.py#L39-L55
train
romanorac/discomll
discomll/classification/linear_svm.py
fit
def fit(dataset, nu=0.1, save_results=True, show=False): """ Function starts a job for calculation of model parameters Parameters ---------- input - dataset object with input urls and other parameters nu - parameter to adjust the classifier save_results - save results to ddfs show - show info about job execution Returns ------- Urls of fit model results on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job if dataset.params["y_map"] == []: raise Exception("Linear proximal SVM requires a target label mapping parameter.") try: nu = float(nu) if nu <= 0: raise Exception("Parameter nu should be greater than 0") except ValueError: raise Exception("Parameter should be numerical.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes mappers and joins them with one reducer job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_fit)), ('group_all', Stage("reduce", init=simple_init, process=reduce_fit, combine=True))] job.params = dataset.params job.params["nu"] = nu job.run(name="linearsvm_fit", input=dataset.params["data_tag"]) fitmodel_url = job.wait(show=show) return {"linsvm_fitmodel": fitmodel_url}
python
def fit(dataset, nu=0.1, save_results=True, show=False): """ Function starts a job for calculation of model parameters Parameters ---------- input - dataset object with input urls and other parameters nu - parameter to adjust the classifier save_results - save results to ddfs show - show info about job execution Returns ------- Urls of fit model results on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job if dataset.params["y_map"] == []: raise Exception("Linear proximal SVM requires a target label mapping parameter.") try: nu = float(nu) if nu <= 0: raise Exception("Parameter nu should be greater than 0") except ValueError: raise Exception("Parameter should be numerical.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes mappers and joins them with one reducer job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_fit)), ('group_all', Stage("reduce", init=simple_init, process=reduce_fit, combine=True))] job.params = dataset.params job.params["nu"] = nu job.run(name="linearsvm_fit", input=dataset.params["data_tag"]) fitmodel_url = job.wait(show=show) return {"linsvm_fitmodel": fitmodel_url}
[ "def", "fit", "(", "dataset", ",", "nu", "=", "0.1", ",", "save_results", "=", "True", ",", "show", "=", "False", ")", ":", "from", "disco", ".", "worker", ".", "pipeline", ".", "worker", "import", "Worker", ",", "Stage", "from", "disco", ".", "core", "import", "Job", "if", "dataset", ".", "params", "[", "\"y_map\"", "]", "==", "[", "]", ":", "raise", "Exception", "(", "\"Linear proximal SVM requires a target label mapping parameter.\"", ")", "try", ":", "nu", "=", "float", "(", "nu", ")", "if", "nu", "<=", "0", ":", "raise", "Exception", "(", "\"Parameter nu should be greater than 0\"", ")", "except", "ValueError", ":", "raise", "Exception", "(", "\"Parameter should be numerical.\"", ")", "job", "=", "Job", "(", "worker", "=", "Worker", "(", "save_results", "=", "save_results", ")", ")", "# job parallelizes mappers and joins them with one reducer", "job", ".", "pipeline", "=", "[", "(", "\"split\"", ",", "Stage", "(", "\"map\"", ",", "input_chain", "=", "dataset", ".", "params", "[", "\"input_chain\"", "]", ",", "init", "=", "simple_init", ",", "process", "=", "map_fit", ")", ")", ",", "(", "'group_all'", ",", "Stage", "(", "\"reduce\"", ",", "init", "=", "simple_init", ",", "process", "=", "reduce_fit", ",", "combine", "=", "True", ")", ")", "]", "job", ".", "params", "=", "dataset", ".", "params", "job", ".", "params", "[", "\"nu\"", "]", "=", "nu", "job", ".", "run", "(", "name", "=", "\"linearsvm_fit\"", ",", "input", "=", "dataset", ".", "params", "[", "\"data_tag\"", "]", ")", "fitmodel_url", "=", "job", ".", "wait", "(", "show", "=", "show", ")", "return", "{", "\"linsvm_fitmodel\"", ":", "fitmodel_url", "}" ]
Function starts a job for calculation of model parameters Parameters ---------- input - dataset object with input urls and other parameters nu - parameter to adjust the classifier save_results - save results to ddfs show - show info about job execution Returns ------- Urls of fit model results on ddfs
[ "Function", "starts", "a", "job", "for", "calculation", "of", "model", "parameters" ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/linear_svm.py#L77-L115
train
romanorac/discomll
discomll/classification/linear_svm.py
predict
def predict(dataset, fitmodel_url, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator if "linsvm_fitmodel" not in fitmodel_url: raise Exception("Incorrect fit model.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params job.params["fit_params"] = [v for _, v in result_iterator(fitmodel_url["linsvm_fitmodel"])][0] job.run(name="linsvm_predict", input=dataset.params["data_tag"]) return job.wait(show=show)
python
def predict(dataset, fitmodel_url, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator if "linsvm_fitmodel" not in fitmodel_url: raise Exception("Incorrect fit model.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params job.params["fit_params"] = [v for _, v in result_iterator(fitmodel_url["linsvm_fitmodel"])][0] job.run(name="linsvm_predict", input=dataset.params["data_tag"]) return job.wait(show=show)
[ "def", "predict", "(", "dataset", ",", "fitmodel_url", ",", "save_results", "=", "True", ",", "show", "=", "False", ")", ":", "from", "disco", ".", "worker", ".", "pipeline", ".", "worker", "import", "Worker", ",", "Stage", "from", "disco", ".", "core", "import", "Job", ",", "result_iterator", "if", "\"linsvm_fitmodel\"", "not", "in", "fitmodel_url", ":", "raise", "Exception", "(", "\"Incorrect fit model.\"", ")", "job", "=", "Job", "(", "worker", "=", "Worker", "(", "save_results", "=", "save_results", ")", ")", "# job parallelizes execution of mappers", "job", ".", "pipeline", "=", "[", "(", "\"split\"", ",", "Stage", "(", "\"map\"", ",", "input_chain", "=", "dataset", ".", "params", "[", "\"input_chain\"", "]", ",", "init", "=", "simple_init", ",", "process", "=", "map_predict", ")", ")", "]", "job", ".", "params", "=", "dataset", ".", "params", "job", ".", "params", "[", "\"fit_params\"", "]", "=", "[", "v", "for", "_", ",", "v", "in", "result_iterator", "(", "fitmodel_url", "[", "\"linsvm_fitmodel\"", "]", ")", "]", "[", "0", "]", "job", ".", "run", "(", "name", "=", "\"linsvm_predict\"", ",", "input", "=", "dataset", ".", "params", "[", "\"data_tag\"", "]", ")", "return", "job", ".", "wait", "(", "show", "=", "show", ")" ]
Function starts a job that makes predictions to input data with a given model. Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs
[ "Function", "starts", "a", "job", "that", "makes", "predictions", "to", "input", "data", "with", "a", "given", "model", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/linear_svm.py#L118-L148
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/mockup/views.py
RedirectFormMixin.validate_redirect_url
def validate_redirect_url(next_url): """ Returns the next_url path if next_url matches allowed hosts. """ if not next_url: return None parts = urlparse(next_url) if parts.netloc: domain, _ = split_domain_port(parts.netloc) allowed_hosts = (['*'] if django_settings.DEBUG else django_settings.ALLOWED_HOSTS) if not (domain and validate_host(domain, allowed_hosts)): return None return urlunparse(("", "", parts.path, parts.params, parts.query, parts.fragment))
python
def validate_redirect_url(next_url): """ Returns the next_url path if next_url matches allowed hosts. """ if not next_url: return None parts = urlparse(next_url) if parts.netloc: domain, _ = split_domain_port(parts.netloc) allowed_hosts = (['*'] if django_settings.DEBUG else django_settings.ALLOWED_HOSTS) if not (domain and validate_host(domain, allowed_hosts)): return None return urlunparse(("", "", parts.path, parts.params, parts.query, parts.fragment))
[ "def", "validate_redirect_url", "(", "next_url", ")", ":", "if", "not", "next_url", ":", "return", "None", "parts", "=", "urlparse", "(", "next_url", ")", "if", "parts", ".", "netloc", ":", "domain", ",", "_", "=", "split_domain_port", "(", "parts", ".", "netloc", ")", "allowed_hosts", "=", "(", "[", "'*'", "]", "if", "django_settings", ".", "DEBUG", "else", "django_settings", ".", "ALLOWED_HOSTS", ")", "if", "not", "(", "domain", "and", "validate_host", "(", "domain", ",", "allowed_hosts", ")", ")", ":", "return", "None", "return", "urlunparse", "(", "(", "\"\"", ",", "\"\"", ",", "parts", ".", "path", ",", "parts", ".", "params", ",", "parts", ".", "query", ",", "parts", ".", "fragment", ")", ")" ]
Returns the next_url path if next_url matches allowed hosts.
[ "Returns", "the", "next_url", "path", "if", "next_url", "matches", "allowed", "hosts", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/mockup/views.py#L51-L65
train
bitlabstudio/django-currency-history
currency_history/templatetags/currency_history_tags.py
convert_currency
def convert_currency(amount, from_currency, to_currency): ''' Converts currencies. Example: {% convert_currency 2 'EUR' 'SGD' as amount %} ''' try: rate = CurrencyRate.objects.get( from_currency__iso_code=from_currency, to_currency__iso_code=to_currency) except CurrencyRate.DoesNotExist: return _('n/a') try: history = rate.history.all()[0] except IndexError: return _('n/a') return amount * history.value
python
def convert_currency(amount, from_currency, to_currency): ''' Converts currencies. Example: {% convert_currency 2 'EUR' 'SGD' as amount %} ''' try: rate = CurrencyRate.objects.get( from_currency__iso_code=from_currency, to_currency__iso_code=to_currency) except CurrencyRate.DoesNotExist: return _('n/a') try: history = rate.history.all()[0] except IndexError: return _('n/a') return amount * history.value
[ "def", "convert_currency", "(", "amount", ",", "from_currency", ",", "to_currency", ")", ":", "try", ":", "rate", "=", "CurrencyRate", ".", "objects", ".", "get", "(", "from_currency__iso_code", "=", "from_currency", ",", "to_currency__iso_code", "=", "to_currency", ")", "except", "CurrencyRate", ".", "DoesNotExist", ":", "return", "_", "(", "'n/a'", ")", "try", ":", "history", "=", "rate", ".", "history", ".", "all", "(", ")", "[", "0", "]", "except", "IndexError", ":", "return", "_", "(", "'n/a'", ")", "return", "amount", "*", "history", ".", "value" ]
Converts currencies. Example: {% convert_currency 2 'EUR' 'SGD' as amount %}
[ "Converts", "currencies", "." ]
65fd0fd79c925cd2e87db59762a4ee7066e47c9f
https://github.com/bitlabstudio/django-currency-history/blob/65fd0fd79c925cd2e87db59762a4ee7066e47c9f/currency_history/templatetags/currency_history_tags.py#L11-L29
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/urlbuilders.py
url_prefixed
def url_prefixed(regex, view, name=None): """ Returns a urlpattern prefixed with the APP_NAME in debug mode. """ return url(r'^%(app_prefix)s%(regex)s' % { 'app_prefix': APP_PREFIX, 'regex': regex}, view, name=name)
python
def url_prefixed(regex, view, name=None): """ Returns a urlpattern prefixed with the APP_NAME in debug mode. """ return url(r'^%(app_prefix)s%(regex)s' % { 'app_prefix': APP_PREFIX, 'regex': regex}, view, name=name)
[ "def", "url_prefixed", "(", "regex", ",", "view", ",", "name", "=", "None", ")", ":", "return", "url", "(", "r'^%(app_prefix)s%(regex)s'", "%", "{", "'app_prefix'", ":", "APP_PREFIX", ",", "'regex'", ":", "regex", "}", ",", "view", ",", "name", "=", "name", ")" ]
Returns a urlpattern prefixed with the APP_NAME in debug mode.
[ "Returns", "a", "urlpattern", "prefixed", "with", "the", "APP_NAME", "in", "debug", "mode", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/urlbuilders.py#L37-L42
train
mgoral/subconvert
src/subconvert/gui/DataModel.py
DataController.createDataFromFile
def createDataFromFile(self, filePath, inputEncoding = None, defaultFps = None): """Fetch a given filePath and parse its contents. May raise the following exceptions: * RuntimeError - generic exception telling that parsing was unsuccessfull * IOError - failed to open a file at given filePath @return SubtitleData filled with non-empty, default datafields. Client should modify them and then perform an add/update operation""" file_ = File(filePath) if inputEncoding is None: inputEncoding = file_.detectEncoding() inputEncoding = inputEncoding.lower() videoInfo = VideoInfo(defaultFps) if defaultFps is not None else file_.detectFps() subtitles = self._parseFile(file_, inputEncoding, videoInfo.fps) data = SubtitleData() data.subtitles = subtitles data.fps = videoInfo.fps data.inputEncoding = inputEncoding data.outputEncoding = inputEncoding data.outputFormat = self._parser.parsedFormat() data.videoPath = videoInfo.videoPath return data
python
def createDataFromFile(self, filePath, inputEncoding = None, defaultFps = None): """Fetch a given filePath and parse its contents. May raise the following exceptions: * RuntimeError - generic exception telling that parsing was unsuccessfull * IOError - failed to open a file at given filePath @return SubtitleData filled with non-empty, default datafields. Client should modify them and then perform an add/update operation""" file_ = File(filePath) if inputEncoding is None: inputEncoding = file_.detectEncoding() inputEncoding = inputEncoding.lower() videoInfo = VideoInfo(defaultFps) if defaultFps is not None else file_.detectFps() subtitles = self._parseFile(file_, inputEncoding, videoInfo.fps) data = SubtitleData() data.subtitles = subtitles data.fps = videoInfo.fps data.inputEncoding = inputEncoding data.outputEncoding = inputEncoding data.outputFormat = self._parser.parsedFormat() data.videoPath = videoInfo.videoPath return data
[ "def", "createDataFromFile", "(", "self", ",", "filePath", ",", "inputEncoding", "=", "None", ",", "defaultFps", "=", "None", ")", ":", "file_", "=", "File", "(", "filePath", ")", "if", "inputEncoding", "is", "None", ":", "inputEncoding", "=", "file_", ".", "detectEncoding", "(", ")", "inputEncoding", "=", "inputEncoding", ".", "lower", "(", ")", "videoInfo", "=", "VideoInfo", "(", "defaultFps", ")", "if", "defaultFps", "is", "not", "None", "else", "file_", ".", "detectFps", "(", ")", "subtitles", "=", "self", ".", "_parseFile", "(", "file_", ",", "inputEncoding", ",", "videoInfo", ".", "fps", ")", "data", "=", "SubtitleData", "(", ")", "data", ".", "subtitles", "=", "subtitles", "data", ".", "fps", "=", "videoInfo", ".", "fps", "data", ".", "inputEncoding", "=", "inputEncoding", "data", ".", "outputEncoding", "=", "inputEncoding", "data", ".", "outputFormat", "=", "self", ".", "_parser", ".", "parsedFormat", "(", ")", "data", ".", "videoPath", "=", "videoInfo", ".", "videoPath", "return", "data" ]
Fetch a given filePath and parse its contents. May raise the following exceptions: * RuntimeError - generic exception telling that parsing was unsuccessfull * IOError - failed to open a file at given filePath @return SubtitleData filled with non-empty, default datafields. Client should modify them and then perform an add/update operation
[ "Fetch", "a", "given", "filePath", "and", "parse", "its", "contents", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/DataModel.py#L62-L88
train
mkoura/dump2polarion
dump2polarion/properties.py
_set_property
def _set_property(xml_root, name, value, properties=None): """Sets property to specified value.""" if properties is None: properties = xml_root.find("properties") for prop in properties: if prop.get("name") == name: prop.set("value", utils.get_unicode_str(value)) break else: etree.SubElement( properties, "property", {"name": name, "value": utils.get_unicode_str(value)} )
python
def _set_property(xml_root, name, value, properties=None): """Sets property to specified value.""" if properties is None: properties = xml_root.find("properties") for prop in properties: if prop.get("name") == name: prop.set("value", utils.get_unicode_str(value)) break else: etree.SubElement( properties, "property", {"name": name, "value": utils.get_unicode_str(value)} )
[ "def", "_set_property", "(", "xml_root", ",", "name", ",", "value", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "xml_root", ".", "find", "(", "\"properties\"", ")", "for", "prop", "in", "properties", ":", "if", "prop", ".", "get", "(", "\"name\"", ")", "==", "name", ":", "prop", ".", "set", "(", "\"value\"", ",", "utils", ".", "get_unicode_str", "(", "value", ")", ")", "break", "else", ":", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "name", ",", "\"value\"", ":", "utils", ".", "get_unicode_str", "(", "value", ")", "}", ")" ]
Sets property to specified value.
[ "Sets", "property", "to", "specified", "value", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L19-L31
train
mkoura/dump2polarion
dump2polarion/properties.py
generate_response_property
def generate_response_property(name=None, value=None): """Generates response property.""" name = name or "dump2polarion" value = value or "".join(random.sample(string.ascii_lowercase, 12)) return (name, value)
python
def generate_response_property(name=None, value=None): """Generates response property.""" name = name or "dump2polarion" value = value or "".join(random.sample(string.ascii_lowercase, 12)) return (name, value)
[ "def", "generate_response_property", "(", "name", "=", "None", ",", "value", "=", "None", ")", ":", "name", "=", "name", "or", "\"dump2polarion\"", "value", "=", "value", "or", "\"\"", ".", "join", "(", "random", ".", "sample", "(", "string", ".", "ascii_lowercase", ",", "12", ")", ")", "return", "(", "name", ",", "value", ")" ]
Generates response property.
[ "Generates", "response", "property", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L75-L79
train
mkoura/dump2polarion
dump2polarion/properties.py
fill_response_property
def fill_response_property(xml_root, name=None, value=None): """Returns response property and fills it if missing.""" name, value = generate_response_property(name, value) response_property = None if xml_root.tag == "testsuites": response_property = _fill_testsuites_response_property(xml_root, name, value) elif xml_root.tag in ("testcases", "requirements"): response_property = _fill_non_testsuites_response_property(xml_root, name, value) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) return response_property
python
def fill_response_property(xml_root, name=None, value=None): """Returns response property and fills it if missing.""" name, value = generate_response_property(name, value) response_property = None if xml_root.tag == "testsuites": response_property = _fill_testsuites_response_property(xml_root, name, value) elif xml_root.tag in ("testcases", "requirements"): response_property = _fill_non_testsuites_response_property(xml_root, name, value) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) return response_property
[ "def", "fill_response_property", "(", "xml_root", ",", "name", "=", "None", ",", "value", "=", "None", ")", ":", "name", ",", "value", "=", "generate_response_property", "(", "name", ",", "value", ")", "response_property", "=", "None", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "response_property", "=", "_fill_testsuites_response_property", "(", "xml_root", ",", "name", ",", "value", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "response_property", "=", "_fill_non_testsuites_response_property", "(", "xml_root", ",", "name", ",", "value", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")", "return", "response_property" ]
Returns response property and fills it if missing.
[ "Returns", "response", "property", "and", "fills", "it", "if", "missing", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L122-L134
train
mkoura/dump2polarion
dump2polarion/properties.py
remove_response_property
def remove_response_property(xml_root): """Removes response properties if exist.""" if xml_root.tag == "testsuites": properties = xml_root.find("properties") resp_properties = [] for prop in properties: prop_name = prop.get("name", "") if "polarion-response-" in prop_name: resp_properties.append(prop) for resp_property in resp_properties: properties.remove(resp_property) elif xml_root.tag in ("testcases", "requirements"): resp_properties = xml_root.find("response-properties") if resp_properties is not None: xml_root.remove(resp_properties) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
python
def remove_response_property(xml_root): """Removes response properties if exist.""" if xml_root.tag == "testsuites": properties = xml_root.find("properties") resp_properties = [] for prop in properties: prop_name = prop.get("name", "") if "polarion-response-" in prop_name: resp_properties.append(prop) for resp_property in resp_properties: properties.remove(resp_property) elif xml_root.tag in ("testcases", "requirements"): resp_properties = xml_root.find("response-properties") if resp_properties is not None: xml_root.remove(resp_properties) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "remove_response_property", "(", "xml_root", ")", ":", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "properties", "=", "xml_root", ".", "find", "(", "\"properties\"", ")", "resp_properties", "=", "[", "]", "for", "prop", "in", "properties", ":", "prop_name", "=", "prop", ".", "get", "(", "\"name\"", ",", "\"\"", ")", "if", "\"polarion-response-\"", "in", "prop_name", ":", "resp_properties", ".", "append", "(", "prop", ")", "for", "resp_property", "in", "resp_properties", ":", "properties", ".", "remove", "(", "resp_property", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "resp_properties", "=", "xml_root", ".", "find", "(", "\"response-properties\"", ")", "if", "resp_properties", "is", "not", "None", ":", "xml_root", ".", "remove", "(", "resp_properties", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
Removes response properties if exist.
[ "Removes", "response", "properties", "if", "exist", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L137-L153
train
mkoura/dump2polarion
dump2polarion/properties.py
remove_property
def remove_property(xml_root, partial_name): """Removes properties if exist.""" if xml_root.tag in ("testsuites", "testcases", "requirements"): properties = xml_root.find("properties") remove_properties = [] for prop in properties: prop_name = prop.get("name", "") if partial_name in prop_name: remove_properties.append(prop) for rem_prop in remove_properties: properties.remove(rem_prop) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
python
def remove_property(xml_root, partial_name): """Removes properties if exist.""" if xml_root.tag in ("testsuites", "testcases", "requirements"): properties = xml_root.find("properties") remove_properties = [] for prop in properties: prop_name = prop.get("name", "") if partial_name in prop_name: remove_properties.append(prop) for rem_prop in remove_properties: properties.remove(rem_prop) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "remove_property", "(", "xml_root", ",", "partial_name", ")", ":", "if", "xml_root", ".", "tag", "in", "(", "\"testsuites\"", ",", "\"testcases\"", ",", "\"requirements\"", ")", ":", "properties", "=", "xml_root", ".", "find", "(", "\"properties\"", ")", "remove_properties", "=", "[", "]", "for", "prop", "in", "properties", ":", "prop_name", "=", "prop", ".", "get", "(", "\"name\"", ",", "\"\"", ")", "if", "partial_name", "in", "prop_name", ":", "remove_properties", ".", "append", "(", "prop", ")", "for", "rem_prop", "in", "remove_properties", ":", "properties", ".", "remove", "(", "rem_prop", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
Removes properties if exist.
[ "Removes", "properties", "if", "exist", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L156-L168
train
mkoura/dump2polarion
dump2polarion/properties.py
set_lookup_method
def set_lookup_method(xml_root, value): """Changes lookup method.""" if xml_root.tag == "testsuites": _set_property(xml_root, "polarion-lookup-method", value) elif xml_root.tag in ("testcases", "requirements"): _set_property(xml_root, "lookup-method", value) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
python
def set_lookup_method(xml_root, value): """Changes lookup method.""" if xml_root.tag == "testsuites": _set_property(xml_root, "polarion-lookup-method", value) elif xml_root.tag in ("testcases", "requirements"): _set_property(xml_root, "lookup-method", value) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "set_lookup_method", "(", "xml_root", ",", "value", ")", ":", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "_set_property", "(", "xml_root", ",", "\"polarion-lookup-method\"", ",", "value", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "_set_property", "(", "xml_root", ",", "\"lookup-method\"", ",", "value", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
Changes lookup method.
[ "Changes", "lookup", "method", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L171-L178
train
mkoura/dump2polarion
dump2polarion/properties.py
set_dry_run
def set_dry_run(xml_root, value=True): """Sets dry-run so records are not updated, only log file is produced.""" value_str = str(value).lower() assert value_str in ("true", "false") if xml_root.tag == "testsuites": _set_property(xml_root, "polarion-dry-run", value_str) elif xml_root.tag in ("testcases", "requirements"): _set_property(xml_root, "dry-run", value_str) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
python
def set_dry_run(xml_root, value=True): """Sets dry-run so records are not updated, only log file is produced.""" value_str = str(value).lower() assert value_str in ("true", "false") if xml_root.tag == "testsuites": _set_property(xml_root, "polarion-dry-run", value_str) elif xml_root.tag in ("testcases", "requirements"): _set_property(xml_root, "dry-run", value_str) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "set_dry_run", "(", "xml_root", ",", "value", "=", "True", ")", ":", "value_str", "=", "str", "(", "value", ")", ".", "lower", "(", ")", "assert", "value_str", "in", "(", "\"true\"", ",", "\"false\"", ")", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "_set_property", "(", "xml_root", ",", "\"polarion-dry-run\"", ",", "value_str", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "_set_property", "(", "xml_root", ",", "\"dry-run\"", ",", "value_str", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
Sets dry-run so records are not updated, only log file is produced.
[ "Sets", "dry", "-", "run", "so", "records", "are", "not", "updated", "only", "log", "file", "is", "produced", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/properties.py#L181-L190
train
darvid/biome
src/biome/__init__.py
Habitat.get_environ
def get_environ(cls, prefix): """Retrieves environment variables from a namespace. Args: prefix (str): The prefix, without a trailing underscore. Returns: list: A list of environment variable keys and values. """ return ((key[len(prefix) + 1:], value) for key, value in os.environ.items() if key.startswith('%s_' % prefix))
python
def get_environ(cls, prefix): """Retrieves environment variables from a namespace. Args: prefix (str): The prefix, without a trailing underscore. Returns: list: A list of environment variable keys and values. """ return ((key[len(prefix) + 1:], value) for key, value in os.environ.items() if key.startswith('%s_' % prefix))
[ "def", "get_environ", "(", "cls", ",", "prefix", ")", ":", "return", "(", "(", "key", "[", "len", "(", "prefix", ")", "+", "1", ":", "]", ",", "value", ")", "for", "key", ",", "value", "in", "os", ".", "environ", ".", "items", "(", ")", "if", "key", ".", "startswith", "(", "'%s_'", "%", "prefix", ")", ")" ]
Retrieves environment variables from a namespace. Args: prefix (str): The prefix, without a trailing underscore. Returns: list: A list of environment variable keys and values.
[ "Retrieves", "environment", "variables", "from", "a", "namespace", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L63-L75
train
darvid/biome
src/biome/__init__.py
Habitat.get_bool
def get_bool(self, name, default=None): """Retrieves an environment variable value as ``bool``. Integer values are converted as expected: zero evaluates to ``False``, and non-zero to ``True``. String values of ``'true'`` and ``'false'`` are evaluated case insensitive. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: bool: The environment variable's value as a ``bool``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value could not be interpreted as a ``bool``. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return bool(self.get_int(name))
python
def get_bool(self, name, default=None): """Retrieves an environment variable value as ``bool``. Integer values are converted as expected: zero evaluates to ``False``, and non-zero to ``True``. String values of ``'true'`` and ``'false'`` are evaluated case insensitive. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: bool: The environment variable's value as a ``bool``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value could not be interpreted as a ``bool``. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return bool(self.get_int(name))
[ "def", "get_bool", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".", "_prefix", ",", "name", ")", "return", "bool", "(", "self", ".", "get_int", "(", "name", ")", ")" ]
Retrieves an environment variable value as ``bool``. Integer values are converted as expected: zero evaluates to ``False``, and non-zero to ``True``. String values of ``'true'`` and ``'false'`` are evaluated case insensitive. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: bool: The environment variable's value as a ``bool``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value could not be interpreted as a ``bool``.
[ "Retrieves", "an", "environment", "variable", "value", "as", "bool", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L102-L128
train
darvid/biome
src/biome/__init__.py
Habitat.get_dict
def get_dict(self, name, default=None): """Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return dict(**self.get(name))
python
def get_dict(self, name, default=None): """Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return dict(**self.get(name))
[ "def", "get_dict", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".", "_prefix", ",", "name", ")", "return", "dict", "(", "*", "*", "self", ".", "get", "(", "name", ")", ")" ]
Retrieves an environment variable value as a dictionary. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: dict: The environment variable's value as a ``dict``. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided.
[ "Retrieves", "an", "environment", "variable", "value", "as", "a", "dictionary", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L130-L150
train
darvid/biome
src/biome/__init__.py
Habitat.get_int
def get_int(self, name, default=None): """Retrieves an environment variable as an integer. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: int: The environment variable's value as an integer. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return int(self[name])
python
def get_int(self, name, default=None): """Retrieves an environment variable as an integer. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: int: The environment variable's value as an integer. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return int(self[name])
[ "def", "get_int", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".", "_prefix", ",", "name", ")", "return", "int", "(", "self", "[", "name", "]", ")" ]
Retrieves an environment variable as an integer. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: int: The environment variable's value as an integer. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10.
[ "Retrieves", "an", "environment", "variable", "as", "an", "integer", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L152-L174
train
darvid/biome
src/biome/__init__.py
Habitat.get_list
def get_list(self, name, default=None): """Retrieves an environment variable as a list. Note that while implicit access of environment variables containing tuples will return tuples, using this method will coerce tuples to lists. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: list: The environment variable's value as a list. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return list(self[name])
python
def get_list(self, name, default=None): """Retrieves an environment variable as a list. Note that while implicit access of environment variables containing tuples will return tuples, using this method will coerce tuples to lists. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: list: The environment variable's value as a list. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10. """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return list(self[name])
[ "def", "get_list", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".", "_prefix", ",", "name", ")", "return", "list", "(", "self", "[", "name", "]", ")" ]
Retrieves an environment variable as a list. Note that while implicit access of environment variables containing tuples will return tuples, using this method will coerce tuples to lists. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: list: The environment variable's value as a list. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10.
[ "Retrieves", "an", "environment", "variable", "as", "a", "list", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L176-L202
train
darvid/biome
src/biome/__init__.py
Habitat.get_path
def get_path(self, name, default=None): """Retrieves an environment variable as a filesystem path. Requires the `pathlib`_ library if using Python <= 3.4. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: pathlib.Path: The environment variable as a ``pathlib.Path`` object. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. .. _pathlib: https://pypi.python.org/pypi/pathlib/ """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return pathlib.Path(self[name])
python
def get_path(self, name, default=None): """Retrieves an environment variable as a filesystem path. Requires the `pathlib`_ library if using Python <= 3.4. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: pathlib.Path: The environment variable as a ``pathlib.Path`` object. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. .. _pathlib: https://pypi.python.org/pypi/pathlib/ """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return pathlib.Path(self[name])
[ "def", "get_path", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".", "_prefix", ",", "name", ")", "return", "pathlib", ".", "Path", "(", "self", "[", "name", "]", ")" ]
Retrieves an environment variable as a filesystem path. Requires the `pathlib`_ library if using Python <= 3.4. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: pathlib.Path: The environment variable as a ``pathlib.Path`` object. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. .. _pathlib: https://pypi.python.org/pypi/pathlib/
[ "Retrieves", "an", "environment", "variable", "as", "a", "filesystem", "path", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L204-L230
train
darvid/biome
src/biome/__init__.py
Habitat.refresh
def refresh(self): """Update all environment variables from ``os.environ``. Use if ``os.environ`` was modified dynamically *after* you accessed an environment namespace with ``biome``. """ super(Habitat, self).update(self.get_environ(self._prefix))
python
def refresh(self): """Update all environment variables from ``os.environ``. Use if ``os.environ`` was modified dynamically *after* you accessed an environment namespace with ``biome``. """ super(Habitat, self).update(self.get_environ(self._prefix))
[ "def", "refresh", "(", "self", ")", ":", "super", "(", "Habitat", ",", "self", ")", ".", "update", "(", "self", ".", "get_environ", "(", "self", ".", "_prefix", ")", ")" ]
Update all environment variables from ``os.environ``. Use if ``os.environ`` was modified dynamically *after* you accessed an environment namespace with ``biome``.
[ "Update", "all", "environment", "variables", "from", "os", ".", "environ", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L232-L239
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._transform_result
def _transform_result(self, result): """Calls transform function on result.""" if self._transform_func: result = self._transform_func(result) return result or None
python
def _transform_result(self, result): """Calls transform function on result.""" if self._transform_func: result = self._transform_func(result) return result or None
[ "def", "_transform_result", "(", "self", ",", "result", ")", ":", "if", "self", ".", "_transform_func", ":", "result", "=", "self", ".", "_transform_func", "(", "result", ")", "return", "result", "or", "None" ]
Calls transform function on result.
[ "Calls", "transform", "function", "on", "result", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L144-L148
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._get_verdict
def _get_verdict(result): """Gets verdict of the testcase.""" verdict = result.get("verdict") if not verdict: return None verdict = verdict.strip().lower() if verdict not in Verdicts.PASS + Verdicts.FAIL + Verdicts.SKIP + Verdicts.WAIT: return None return verdict
python
def _get_verdict(result): """Gets verdict of the testcase.""" verdict = result.get("verdict") if not verdict: return None verdict = verdict.strip().lower() if verdict not in Verdicts.PASS + Verdicts.FAIL + Verdicts.SKIP + Verdicts.WAIT: return None return verdict
[ "def", "_get_verdict", "(", "result", ")", ":", "verdict", "=", "result", ".", "get", "(", "\"verdict\"", ")", "if", "not", "verdict", ":", "return", "None", "verdict", "=", "verdict", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "verdict", "not", "in", "Verdicts", ".", "PASS", "+", "Verdicts", ".", "FAIL", "+", "Verdicts", ".", "SKIP", "+", "Verdicts", ".", "WAIT", ":", "return", "None", "return", "verdict" ]
Gets verdict of the testcase.
[ "Gets", "verdict", "of", "the", "testcase", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L151-L159
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._set_lookup_prop
def _set_lookup_prop(self, result_data): """Set lookup property based on processed testcases if not configured.""" if self._lookup_prop: return if result_data.get("id"): self._lookup_prop = "id" elif result_data.get("title"): self._lookup_prop = "name" else: return logger.debug("Setting lookup method for xunit to `%s`", self._lookup_prop)
python
def _set_lookup_prop(self, result_data): """Set lookup property based on processed testcases if not configured.""" if self._lookup_prop: return if result_data.get("id"): self._lookup_prop = "id" elif result_data.get("title"): self._lookup_prop = "name" else: return logger.debug("Setting lookup method for xunit to `%s`", self._lookup_prop)
[ "def", "_set_lookup_prop", "(", "self", ",", "result_data", ")", ":", "if", "self", ".", "_lookup_prop", ":", "return", "if", "result_data", ".", "get", "(", "\"id\"", ")", ":", "self", ".", "_lookup_prop", "=", "\"id\"", "elif", "result_data", ".", "get", "(", "\"title\"", ")", ":", "self", ".", "_lookup_prop", "=", "\"name\"", "else", ":", "return", "logger", ".", "debug", "(", "\"Setting lookup method for xunit to `%s`\"", ",", "self", ".", "_lookup_prop", ")" ]
Set lookup property based on processed testcases if not configured.
[ "Set", "lookup", "property", "based", "on", "processed", "testcases", "if", "not", "configured", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L161-L173
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._fill_out_err
def _fill_out_err(result, testcase): """Adds stdout and stderr if present.""" if result.get("stdout"): system_out = etree.SubElement(testcase, "system-out") system_out.text = utils.get_unicode_str(result["stdout"]) if result.get("stderr"): system_err = etree.SubElement(testcase, "system-err") system_err.text = utils.get_unicode_str(result["stderr"])
python
def _fill_out_err(result, testcase): """Adds stdout and stderr if present.""" if result.get("stdout"): system_out = etree.SubElement(testcase, "system-out") system_out.text = utils.get_unicode_str(result["stdout"]) if result.get("stderr"): system_err = etree.SubElement(testcase, "system-err") system_err.text = utils.get_unicode_str(result["stderr"])
[ "def", "_fill_out_err", "(", "result", ",", "testcase", ")", ":", "if", "result", ".", "get", "(", "\"stdout\"", ")", ":", "system_out", "=", "etree", ".", "SubElement", "(", "testcase", ",", "\"system-out\"", ")", "system_out", ".", "text", "=", "utils", ".", "get_unicode_str", "(", "result", "[", "\"stdout\"", "]", ")", "if", "result", ".", "get", "(", "\"stderr\"", ")", ":", "system_err", "=", "etree", ".", "SubElement", "(", "testcase", ",", "\"system-err\"", ")", "system_err", ".", "text", "=", "utils", ".", "get_unicode_str", "(", "result", "[", "\"stderr\"", "]", ")" ]
Adds stdout and stderr if present.
[ "Adds", "stdout", "and", "stderr", "if", "present", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L199-L207
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport._fill_properties
def _fill_properties(verdict, result, testcase, testcase_id, testcase_title): """Adds properties into testcase element.""" properties = etree.SubElement(testcase, "properties") etree.SubElement( properties, "property", {"name": "polarion-testcase-id", "value": testcase_id or testcase_title}, ) if verdict in Verdicts.PASS and result.get("comment"): etree.SubElement( properties, "property", { "name": "polarion-testcase-comment", "value": utils.get_unicode_str(result["comment"]), }, ) for param, value in six.iteritems(result.get("params") or {}): etree.SubElement( properties, "property", { "name": "polarion-parameter-{}".format(param), "value": utils.get_unicode_str(value), }, )
python
def _fill_properties(verdict, result, testcase, testcase_id, testcase_title): """Adds properties into testcase element.""" properties = etree.SubElement(testcase, "properties") etree.SubElement( properties, "property", {"name": "polarion-testcase-id", "value": testcase_id or testcase_title}, ) if verdict in Verdicts.PASS and result.get("comment"): etree.SubElement( properties, "property", { "name": "polarion-testcase-comment", "value": utils.get_unicode_str(result["comment"]), }, ) for param, value in six.iteritems(result.get("params") or {}): etree.SubElement( properties, "property", { "name": "polarion-parameter-{}".format(param), "value": utils.get_unicode_str(value), }, )
[ "def", "_fill_properties", "(", "verdict", ",", "result", ",", "testcase", ",", "testcase_id", ",", "testcase_title", ")", ":", "properties", "=", "etree", ".", "SubElement", "(", "testcase", ",", "\"properties\"", ")", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-testcase-id\"", ",", "\"value\"", ":", "testcase_id", "or", "testcase_title", "}", ",", ")", "if", "verdict", "in", "Verdicts", ".", "PASS", "and", "result", ".", "get", "(", "\"comment\"", ")", ":", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-testcase-comment\"", ",", "\"value\"", ":", "utils", ".", "get_unicode_str", "(", "result", "[", "\"comment\"", "]", ")", ",", "}", ",", ")", "for", "param", ",", "value", "in", "six", ".", "iteritems", "(", "result", ".", "get", "(", "\"params\"", ")", "or", "{", "}", ")", ":", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-parameter-{}\"", ".", "format", "(", "param", ")", ",", "\"value\"", ":", "utils", ".", "get_unicode_str", "(", "value", ")", ",", "}", ",", ")" ]
Adds properties into testcase element.
[ "Adds", "properties", "into", "testcase", "element", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L210-L236
train
mkoura/dump2polarion
dump2polarion/exporters/xunit_exporter.py
XunitExport.export
def export(self): """Returns XUnit XML.""" top = self._top_element() properties = self._properties_element(top) testsuite = self._testsuite_element(top) self._fill_tests_results(testsuite) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
python
def export(self): """Returns XUnit XML.""" top = self._top_element() properties = self._properties_element(top) testsuite = self._testsuite_element(top) self._fill_tests_results(testsuite) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
[ "def", "export", "(", "self", ")", ":", "top", "=", "self", ".", "_top_element", "(", ")", "properties", "=", "self", ".", "_properties_element", "(", "top", ")", "testsuite", "=", "self", ".", "_testsuite_element", "(", "top", ")", "self", ".", "_fill_tests_results", "(", "testsuite", ")", "self", ".", "_fill_lookup_prop", "(", "properties", ")", "return", "utils", ".", "prettify_xml", "(", "top", ")" ]
Returns XUnit XML.
[ "Returns", "XUnit", "XML", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/xunit_exporter.py#L290-L297
train
mkoura/dump2polarion
dump2polarion/parselogs.py
parse
def parse(log_file): """Parse log file.""" with io.open(os.path.expanduser(log_file), encoding="utf-8") as input_file: for line in input_file: if "Starting import of XUnit results" in line: obj = XUnitParser break elif "Starting import of test cases" in line: obj = TestcasesParser break elif "Starting import of requirements" in line: obj = RequirementsParser break else: raise Dump2PolarionException( "No valid data found in the log file '{}'".format(log_file) ) return obj(input_file, log_file).parse()
python
def parse(log_file): """Parse log file.""" with io.open(os.path.expanduser(log_file), encoding="utf-8") as input_file: for line in input_file: if "Starting import of XUnit results" in line: obj = XUnitParser break elif "Starting import of test cases" in line: obj = TestcasesParser break elif "Starting import of requirements" in line: obj = RequirementsParser break else: raise Dump2PolarionException( "No valid data found in the log file '{}'".format(log_file) ) return obj(input_file, log_file).parse()
[ "def", "parse", "(", "log_file", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "expanduser", "(", "log_file", ")", ",", "encoding", "=", "\"utf-8\"", ")", "as", "input_file", ":", "for", "line", "in", "input_file", ":", "if", "\"Starting import of XUnit results\"", "in", "line", ":", "obj", "=", "XUnitParser", "break", "elif", "\"Starting import of test cases\"", "in", "line", ":", "obj", "=", "TestcasesParser", "break", "elif", "\"Starting import of requirements\"", "in", "line", ":", "obj", "=", "RequirementsParser", "break", "else", ":", "raise", "Dump2PolarionException", "(", "\"No valid data found in the log file '{}'\"", ".", "format", "(", "log_file", ")", ")", "return", "obj", "(", "input_file", ",", "log_file", ")", ".", "parse", "(", ")" ]
Parse log file.
[ "Parse", "log", "file", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L246-L264
train
mkoura/dump2polarion
dump2polarion/parselogs.py
XUnitParser.get_result
def get_result(self, line): """Gets work item name and id.""" res = self.RESULT_SEARCH.search(line) try: name, ids = res.group(1), res.group(2) except (AttributeError, IndexError): return None ids = ids.split("/") tc_id = ids[0] try: custom_id = ids[1] except IndexError: custom_id = None return LogItem(name, tc_id, custom_id)
python
def get_result(self, line): """Gets work item name and id.""" res = self.RESULT_SEARCH.search(line) try: name, ids = res.group(1), res.group(2) except (AttributeError, IndexError): return None ids = ids.split("/") tc_id = ids[0] try: custom_id = ids[1] except IndexError: custom_id = None return LogItem(name, tc_id, custom_id)
[ "def", "get_result", "(", "self", ",", "line", ")", ":", "res", "=", "self", ".", "RESULT_SEARCH", ".", "search", "(", "line", ")", "try", ":", "name", ",", "ids", "=", "res", ".", "group", "(", "1", ")", ",", "res", ".", "group", "(", "2", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None", "ids", "=", "ids", ".", "split", "(", "\"/\"", ")", "tc_id", "=", "ids", "[", "0", "]", "try", ":", "custom_id", "=", "ids", "[", "1", "]", "except", "IndexError", ":", "custom_id", "=", "None", "return", "LogItem", "(", "name", ",", "tc_id", ",", "custom_id", ")" ]
Gets work item name and id.
[ "Gets", "work", "item", "name", "and", "id", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L63-L77
train
mkoura/dump2polarion
dump2polarion/parselogs.py
XUnitParser.get_result_warn
def get_result_warn(self, line): """Gets work item name of item that was not successfully imported.""" res = self.RESULT_WARN_SEARCH.search(line) try: return LogItem(res.group(1), None, None) except (AttributeError, IndexError): pass # try again with custom ID res = self.RESULT_WARN_SEARCH_CUSTOM.search(line) try: return LogItem(res.group(1), None, res.group(2)) except (AttributeError, IndexError): return None
python
def get_result_warn(self, line): """Gets work item name of item that was not successfully imported.""" res = self.RESULT_WARN_SEARCH.search(line) try: return LogItem(res.group(1), None, None) except (AttributeError, IndexError): pass # try again with custom ID res = self.RESULT_WARN_SEARCH_CUSTOM.search(line) try: return LogItem(res.group(1), None, res.group(2)) except (AttributeError, IndexError): return None
[ "def", "get_result_warn", "(", "self", ",", "line", ")", ":", "res", "=", "self", ".", "RESULT_WARN_SEARCH", ".", "search", "(", "line", ")", "try", ":", "return", "LogItem", "(", "res", ".", "group", "(", "1", ")", ",", "None", ",", "None", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "pass", "# try again with custom ID", "res", "=", "self", ".", "RESULT_WARN_SEARCH_CUSTOM", ".", "search", "(", "line", ")", "try", ":", "return", "LogItem", "(", "res", ".", "group", "(", "1", ")", ",", "None", ",", "res", ".", "group", "(", "2", ")", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None" ]
Gets work item name of item that was not successfully imported.
[ "Gets", "work", "item", "name", "of", "item", "that", "was", "not", "successfully", "imported", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L79-L92
train
mkoura/dump2polarion
dump2polarion/parselogs.py
RequirementsParser.get_requirement
def get_requirement(self, line): """Gets requirement name and id.""" res = self.REQ_SEARCH.search(line) try: name, tc_id = res.group(1), res.group(2) except (AttributeError, IndexError): return None return LogItem(name, tc_id, None)
python
def get_requirement(self, line): """Gets requirement name and id.""" res = self.REQ_SEARCH.search(line) try: name, tc_id = res.group(1), res.group(2) except (AttributeError, IndexError): return None return LogItem(name, tc_id, None)
[ "def", "get_requirement", "(", "self", ",", "line", ")", ":", "res", "=", "self", ".", "REQ_SEARCH", ".", "search", "(", "line", ")", "try", ":", "name", ",", "tc_id", "=", "res", ".", "group", "(", "1", ")", ",", "res", ".", "group", "(", "2", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None", "return", "LogItem", "(", "name", ",", "tc_id", ",", "None", ")" ]
Gets requirement name and id.
[ "Gets", "requirement", "name", "and", "id", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L198-L206
train
mkoura/dump2polarion
dump2polarion/parselogs.py
RequirementsParser.get_requirement_warn
def get_requirement_warn(self, line): """Gets name of test case that was not successfully imported.""" res = self.REQ_WARN_SEARCH.search(line) try: return LogItem(res.group(1), None, None) except (AttributeError, IndexError): return None
python
def get_requirement_warn(self, line): """Gets name of test case that was not successfully imported.""" res = self.REQ_WARN_SEARCH.search(line) try: return LogItem(res.group(1), None, None) except (AttributeError, IndexError): return None
[ "def", "get_requirement_warn", "(", "self", ",", "line", ")", ":", "res", "=", "self", ".", "REQ_WARN_SEARCH", ".", "search", "(", "line", ")", "try", ":", "return", "LogItem", "(", "res", ".", "group", "(", "1", ")", ",", "None", ",", "None", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None" ]
Gets name of test case that was not successfully imported.
[ "Gets", "name", "of", "test", "case", "that", "was", "not", "successfully", "imported", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L208-L214
train
djaodjin/djaodjin-deployutils
deployutils/configs.py
load_config
def load_config(app_name, *args, **kwargs): """ Given a path to a file, parse its lines in ini-like format, and then set them in the current namespace. Quiet by default. Set verbose to True to see the absolute path to the config files printed on stderr. """ configure_logging() # compatible with Python 2 and 3. prefix = kwargs.get('prefix', 'etc') verbose = kwargs.get('verbose', False) location = kwargs.get('location', None) passphrase = kwargs.get('passphrase', os.getenv("%s_SETTINGS_CRYPT_KEY" % app_name.upper(), os.getenv("SETTINGS_CRYPT_KEY", None))) confnames = args if not location: location = os.getenv("%s_SETTINGS_LOCATION" % app_name.upper(), None) if not location: location = os.getenv("SETTINGS_LOCATION", None) if location: location = "%s/%s" % (location, app_name) config = {} for confname in confnames: content = None if location and location.startswith('s3://'): try: import boto _, bucket_name, prefix = urlparse(location)[:3] try: conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) key_name = '%s/%s' % (prefix, confname) key = bucket.get_key(key_name) content = key.get_contents_as_string() if verbose: LOGGER.info("config loaded from 's3://%s/%s'", bucket_name, key_name) except (boto.exception.NoAuthHandlerFound, boto.exception.S3ResponseError) as _: pass except ImportError: pass # We cannot find a deployutils S3 bucket. Let's look on the filesystem. if not content: confpath = locate_config( confname, app_name, location=location, prefix=prefix, verbose=verbose) if confpath: with open(confpath, 'rb') as conffile: content = conffile.read() if content: if passphrase: content = crypt.decrypt(content, passphrase) if hasattr(content, 'decode'): content = content.decode('utf-8') for line in content.split('\n'): if not line.startswith('#'): look = re.match(r'(\w+)\s*=\s*(.*)', line) if look: try: # We used to parse the file line by line. # Once Django 1.5 introduced ALLOWED_HOSTS # (a tuple that definitely belongs to the site.conf # set), we had no choice other than resort # to eval(value, {}, {}). # We are not resorting to import conf module yet # but that might be necessary once we use # dictionary configs for some of the apps... # TODO: consider using something like ConfigObj # for this: # http://www.voidspace.org.uk/python/configobj.html #pylint:disable=eval-used config.update({look.group(1).upper(): eval(look.group(2), {}, {})}) except Exception: raise return config
python
def load_config(app_name, *args, **kwargs): """ Given a path to a file, parse its lines in ini-like format, and then set them in the current namespace. Quiet by default. Set verbose to True to see the absolute path to the config files printed on stderr. """ configure_logging() # compatible with Python 2 and 3. prefix = kwargs.get('prefix', 'etc') verbose = kwargs.get('verbose', False) location = kwargs.get('location', None) passphrase = kwargs.get('passphrase', os.getenv("%s_SETTINGS_CRYPT_KEY" % app_name.upper(), os.getenv("SETTINGS_CRYPT_KEY", None))) confnames = args if not location: location = os.getenv("%s_SETTINGS_LOCATION" % app_name.upper(), None) if not location: location = os.getenv("SETTINGS_LOCATION", None) if location: location = "%s/%s" % (location, app_name) config = {} for confname in confnames: content = None if location and location.startswith('s3://'): try: import boto _, bucket_name, prefix = urlparse(location)[:3] try: conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) key_name = '%s/%s' % (prefix, confname) key = bucket.get_key(key_name) content = key.get_contents_as_string() if verbose: LOGGER.info("config loaded from 's3://%s/%s'", bucket_name, key_name) except (boto.exception.NoAuthHandlerFound, boto.exception.S3ResponseError) as _: pass except ImportError: pass # We cannot find a deployutils S3 bucket. Let's look on the filesystem. if not content: confpath = locate_config( confname, app_name, location=location, prefix=prefix, verbose=verbose) if confpath: with open(confpath, 'rb') as conffile: content = conffile.read() if content: if passphrase: content = crypt.decrypt(content, passphrase) if hasattr(content, 'decode'): content = content.decode('utf-8') for line in content.split('\n'): if not line.startswith('#'): look = re.match(r'(\w+)\s*=\s*(.*)', line) if look: try: # We used to parse the file line by line. # Once Django 1.5 introduced ALLOWED_HOSTS # (a tuple that definitely belongs to the site.conf # set), we had no choice other than resort # to eval(value, {}, {}). # We are not resorting to import conf module yet # but that might be necessary once we use # dictionary configs for some of the apps... # TODO: consider using something like ConfigObj # for this: # http://www.voidspace.org.uk/python/configobj.html #pylint:disable=eval-used config.update({look.group(1).upper(): eval(look.group(2), {}, {})}) except Exception: raise return config
[ "def", "load_config", "(", "app_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "configure_logging", "(", ")", "# compatible with Python 2 and 3.", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "'etc'", ")", "verbose", "=", "kwargs", ".", "get", "(", "'verbose'", ",", "False", ")", "location", "=", "kwargs", ".", "get", "(", "'location'", ",", "None", ")", "passphrase", "=", "kwargs", ".", "get", "(", "'passphrase'", ",", "os", ".", "getenv", "(", "\"%s_SETTINGS_CRYPT_KEY\"", "%", "app_name", ".", "upper", "(", ")", ",", "os", ".", "getenv", "(", "\"SETTINGS_CRYPT_KEY\"", ",", "None", ")", ")", ")", "confnames", "=", "args", "if", "not", "location", ":", "location", "=", "os", ".", "getenv", "(", "\"%s_SETTINGS_LOCATION\"", "%", "app_name", ".", "upper", "(", ")", ",", "None", ")", "if", "not", "location", ":", "location", "=", "os", ".", "getenv", "(", "\"SETTINGS_LOCATION\"", ",", "None", ")", "if", "location", ":", "location", "=", "\"%s/%s\"", "%", "(", "location", ",", "app_name", ")", "config", "=", "{", "}", "for", "confname", "in", "confnames", ":", "content", "=", "None", "if", "location", "and", "location", ".", "startswith", "(", "'s3://'", ")", ":", "try", ":", "import", "boto", "_", ",", "bucket_name", ",", "prefix", "=", "urlparse", "(", "location", ")", "[", ":", "3", "]", "try", ":", "conn", "=", "boto", ".", "connect_s3", "(", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "key_name", "=", "'%s/%s'", "%", "(", "prefix", ",", "confname", ")", "key", "=", "bucket", ".", "get_key", "(", "key_name", ")", "content", "=", "key", ".", "get_contents_as_string", "(", ")", "if", "verbose", ":", "LOGGER", ".", "info", "(", "\"config loaded from 's3://%s/%s'\"", ",", "bucket_name", ",", "key_name", ")", "except", "(", "boto", ".", "exception", ".", "NoAuthHandlerFound", ",", "boto", ".", "exception", ".", "S3ResponseError", ")", "as", "_", ":", "pass", "except", "ImportError", ":", "pass", "# We cannot find a deployutils S3 bucket. Let's look on the filesystem.", "if", "not", "content", ":", "confpath", "=", "locate_config", "(", "confname", ",", "app_name", ",", "location", "=", "location", ",", "prefix", "=", "prefix", ",", "verbose", "=", "verbose", ")", "if", "confpath", ":", "with", "open", "(", "confpath", ",", "'rb'", ")", "as", "conffile", ":", "content", "=", "conffile", ".", "read", "(", ")", "if", "content", ":", "if", "passphrase", ":", "content", "=", "crypt", ".", "decrypt", "(", "content", ",", "passphrase", ")", "if", "hasattr", "(", "content", ",", "'decode'", ")", ":", "content", "=", "content", ".", "decode", "(", "'utf-8'", ")", "for", "line", "in", "content", ".", "split", "(", "'\\n'", ")", ":", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "look", "=", "re", ".", "match", "(", "r'(\\w+)\\s*=\\s*(.*)'", ",", "line", ")", "if", "look", ":", "try", ":", "# We used to parse the file line by line.", "# Once Django 1.5 introduced ALLOWED_HOSTS", "# (a tuple that definitely belongs to the site.conf", "# set), we had no choice other than resort", "# to eval(value, {}, {}).", "# We are not resorting to import conf module yet", "# but that might be necessary once we use", "# dictionary configs for some of the apps...", "# TODO: consider using something like ConfigObj", "# for this:", "# http://www.voidspace.org.uk/python/configobj.html", "#pylint:disable=eval-used", "config", ".", "update", "(", "{", "look", ".", "group", "(", "1", ")", ".", "upper", "(", ")", ":", "eval", "(", "look", ".", "group", "(", "2", ")", ",", "{", "}", ",", "{", "}", ")", "}", ")", "except", "Exception", ":", "raise", "return", "config" ]
Given a path to a file, parse its lines in ini-like format, and then set them in the current namespace. Quiet by default. Set verbose to True to see the absolute path to the config files printed on stderr.
[ "Given", "a", "path", "to", "a", "file", "parse", "its", "lines", "in", "ini", "-", "like", "format", "and", "then", "set", "them", "in", "the", "current", "namespace", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/configs.py#L82-L166
train
skioo/django-customer-billing
billing/actions/accounts.py
close
def close(account_id: str) -> None: """ Closes the account. :param account_id: the account to close :return: Nothing """ logger.info('closing-account', account_id=account_id) with transaction.atomic(): account = Account.objects.get(pk=account_id) account.close() account.save()
python
def close(account_id: str) -> None: """ Closes the account. :param account_id: the account to close :return: Nothing """ logger.info('closing-account', account_id=account_id) with transaction.atomic(): account = Account.objects.get(pk=account_id) account.close() account.save()
[ "def", "close", "(", "account_id", ":", "str", ")", "->", "None", ":", "logger", ".", "info", "(", "'closing-account'", ",", "account_id", "=", "account_id", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "account", "=", "Account", ".", "objects", ".", "get", "(", "pk", "=", "account_id", ")", "account", ".", "close", "(", ")", "account", ".", "save", "(", ")" ]
Closes the account. :param account_id: the account to close :return: Nothing
[ "Closes", "the", "account", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/accounts.py#L22-L33
train
skioo/django-customer-billing
billing/actions/accounts.py
create_invoices
def create_invoices(account_id: str, due_date: date) -> Sequence[Invoice]: """ Creates the invoices for any due positive charges in the account. If there are due positive charges in different currencies, one invoice is created for each currency. :param account_id: The account to invoice. :param due_date: The due date for any invoice that gets created. :return: A possibly-empty list of Invoices. """ invoices = [] with transaction.atomic(): due_charges = Charge.objects \ .uninvoiced(account_id=account_id) \ .charges() total = total_amount(due_charges) for amount_due in total.monies(): if amount_due.amount > 0: invoice = Invoice.objects.create(account_id=account_id, due_date=due_date) Charge.objects \ .uninvoiced(account_id=account_id) \ .charges() \ .in_currency(currency=amount_due.currency) \ .update(invoice=invoice) invoices.append(invoice) logger.info('created-invoices', account_id=str(account_id), invoice_ids=[i.pk for i in invoices]) for invoice in invoices: invoice_ready.send(sender=create_invoices, invoice=invoice) return invoices
python
def create_invoices(account_id: str, due_date: date) -> Sequence[Invoice]: """ Creates the invoices for any due positive charges in the account. If there are due positive charges in different currencies, one invoice is created for each currency. :param account_id: The account to invoice. :param due_date: The due date for any invoice that gets created. :return: A possibly-empty list of Invoices. """ invoices = [] with transaction.atomic(): due_charges = Charge.objects \ .uninvoiced(account_id=account_id) \ .charges() total = total_amount(due_charges) for amount_due in total.monies(): if amount_due.amount > 0: invoice = Invoice.objects.create(account_id=account_id, due_date=due_date) Charge.objects \ .uninvoiced(account_id=account_id) \ .charges() \ .in_currency(currency=amount_due.currency) \ .update(invoice=invoice) invoices.append(invoice) logger.info('created-invoices', account_id=str(account_id), invoice_ids=[i.pk for i in invoices]) for invoice in invoices: invoice_ready.send(sender=create_invoices, invoice=invoice) return invoices
[ "def", "create_invoices", "(", "account_id", ":", "str", ",", "due_date", ":", "date", ")", "->", "Sequence", "[", "Invoice", "]", ":", "invoices", "=", "[", "]", "with", "transaction", ".", "atomic", "(", ")", ":", "due_charges", "=", "Charge", ".", "objects", ".", "uninvoiced", "(", "account_id", "=", "account_id", ")", ".", "charges", "(", ")", "total", "=", "total_amount", "(", "due_charges", ")", "for", "amount_due", "in", "total", ".", "monies", "(", ")", ":", "if", "amount_due", ".", "amount", ">", "0", ":", "invoice", "=", "Invoice", ".", "objects", ".", "create", "(", "account_id", "=", "account_id", ",", "due_date", "=", "due_date", ")", "Charge", ".", "objects", ".", "uninvoiced", "(", "account_id", "=", "account_id", ")", ".", "charges", "(", ")", ".", "in_currency", "(", "currency", "=", "amount_due", ".", "currency", ")", ".", "update", "(", "invoice", "=", "invoice", ")", "invoices", ".", "append", "(", "invoice", ")", "logger", ".", "info", "(", "'created-invoices'", ",", "account_id", "=", "str", "(", "account_id", ")", ",", "invoice_ids", "=", "[", "i", ".", "pk", "for", "i", "in", "invoices", "]", ")", "for", "invoice", "in", "invoices", ":", "invoice_ready", ".", "send", "(", "sender", "=", "create_invoices", ",", "invoice", "=", "invoice", ")", "return", "invoices" ]
Creates the invoices for any due positive charges in the account. If there are due positive charges in different currencies, one invoice is created for each currency. :param account_id: The account to invoice. :param due_date: The due date for any invoice that gets created. :return: A possibly-empty list of Invoices.
[ "Creates", "the", "invoices", "for", "any", "due", "positive", "charges", "in", "the", "account", ".", "If", "there", "are", "due", "positive", "charges", "in", "different", "currencies", "one", "invoice", "is", "created", "for", "each", "currency", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/accounts.py#L50-L77
train
skioo/django-customer-billing
billing/actions/accounts.py
add_charge
def add_charge(account_id: str, amount: Money, reverses_id: Optional[str] = None, product_code: Optional[str] = None, product_properties: Optional[Dict[str, str]] = None) -> Charge: """ Add a charge to the account. :param account_id: The account on which to add the charge :param amount: The amount of the charge :param reverses_id: Set this if this charge reverses another one :param product_code: A code identifying the type of product cnarged :param product_properties: A dict of hames and values. :return: The newly created charge. """ logger.info('adding-charge', account_id=account_id, amount=amount, product_code=product_code, product_properties=product_properties) with transaction.atomic(): charge = Charge(account_id=account_id, amount=amount) if reverses_id: charge.reverses_id = reverses_id if product_code: charge.product_code = product_code charge.full_clean(exclude=['id', 'account']) # Exclude to avoid unnecessary db queries charge.save(force_insert=True) if product_properties: objs = [ProductProperty(charge=charge, name=k, value=v) for k, v in product_properties.items()] for o in objs: o.full_clean(exclude=['id', 'charge']) # Exclude to avoid unnecessary db queries ProductProperty.objects.bulk_create(objs) return charge
python
def add_charge(account_id: str, amount: Money, reverses_id: Optional[str] = None, product_code: Optional[str] = None, product_properties: Optional[Dict[str, str]] = None) -> Charge: """ Add a charge to the account. :param account_id: The account on which to add the charge :param amount: The amount of the charge :param reverses_id: Set this if this charge reverses another one :param product_code: A code identifying the type of product cnarged :param product_properties: A dict of hames and values. :return: The newly created charge. """ logger.info('adding-charge', account_id=account_id, amount=amount, product_code=product_code, product_properties=product_properties) with transaction.atomic(): charge = Charge(account_id=account_id, amount=amount) if reverses_id: charge.reverses_id = reverses_id if product_code: charge.product_code = product_code charge.full_clean(exclude=['id', 'account']) # Exclude to avoid unnecessary db queries charge.save(force_insert=True) if product_properties: objs = [ProductProperty(charge=charge, name=k, value=v) for k, v in product_properties.items()] for o in objs: o.full_clean(exclude=['id', 'charge']) # Exclude to avoid unnecessary db queries ProductProperty.objects.bulk_create(objs) return charge
[ "def", "add_charge", "(", "account_id", ":", "str", ",", "amount", ":", "Money", ",", "reverses_id", ":", "Optional", "[", "str", "]", "=", "None", ",", "product_code", ":", "Optional", "[", "str", "]", "=", "None", ",", "product_properties", ":", "Optional", "[", "Dict", "[", "str", ",", "str", "]", "]", "=", "None", ")", "->", "Charge", ":", "logger", ".", "info", "(", "'adding-charge'", ",", "account_id", "=", "account_id", ",", "amount", "=", "amount", ",", "product_code", "=", "product_code", ",", "product_properties", "=", "product_properties", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "charge", "=", "Charge", "(", "account_id", "=", "account_id", ",", "amount", "=", "amount", ")", "if", "reverses_id", ":", "charge", ".", "reverses_id", "=", "reverses_id", "if", "product_code", ":", "charge", ".", "product_code", "=", "product_code", "charge", ".", "full_clean", "(", "exclude", "=", "[", "'id'", ",", "'account'", "]", ")", "# Exclude to avoid unnecessary db queries", "charge", ".", "save", "(", "force_insert", "=", "True", ")", "if", "product_properties", ":", "objs", "=", "[", "ProductProperty", "(", "charge", "=", "charge", ",", "name", "=", "k", ",", "value", "=", "v", ")", "for", "k", ",", "v", "in", "product_properties", ".", "items", "(", ")", "]", "for", "o", "in", "objs", ":", "o", ".", "full_clean", "(", "exclude", "=", "[", "'id'", ",", "'charge'", "]", ")", "# Exclude to avoid unnecessary db queries", "ProductProperty", ".", "objects", ".", "bulk_create", "(", "objs", ")", "return", "charge" ]
Add a charge to the account. :param account_id: The account on which to add the charge :param amount: The amount of the charge :param reverses_id: Set this if this charge reverses another one :param product_code: A code identifying the type of product cnarged :param product_properties: A dict of hames and values. :return: The newly created charge.
[ "Add", "a", "charge", "to", "the", "account", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/accounts.py#L80-L114
train
Genida/dependenpy
src/dependenpy/dsm.py
DSM.build_tree
def build_tree(self): """Build the Python packages tree.""" for spec in self.specs: if spec.ismodule: self.modules.append(Module(spec.name, spec.path, dsm=self)) else: self.packages.append(Package( spec.name, spec.path, dsm=self, limit_to=spec.limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
python
def build_tree(self): """Build the Python packages tree.""" for spec in self.specs: if spec.ismodule: self.modules.append(Module(spec.name, spec.path, dsm=self)) else: self.packages.append(Package( spec.name, spec.path, dsm=self, limit_to=spec.limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
[ "def", "build_tree", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "specs", ":", "if", "spec", ".", "ismodule", ":", "self", ".", "modules", ".", "append", "(", "Module", "(", "spec", ".", "name", ",", "spec", ".", "path", ",", "dsm", "=", "self", ")", ")", "else", ":", "self", ".", "packages", ".", "append", "(", "Package", "(", "spec", ".", "name", ",", "spec", ".", "path", ",", "dsm", "=", "self", ",", "limit_to", "=", "spec", ".", "limit_to", ",", "build_tree", "=", "True", ",", "build_dependencies", "=", "False", ",", "enforce_init", "=", "self", ".", "enforce_init", ")", ")" ]
Build the Python packages tree.
[ "Build", "the", "Python", "packages", "tree", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L86-L97
train
Genida/dependenpy
src/dependenpy/dsm.py
Package.split_limits_heads
def split_limits_heads(self): """ Return first parts of dot-separated strings, and rest of strings. Returns: (list of str, list of str): the heads and rest of the strings. """ heads = [] new_limit_to = [] for limit in self.limit_to: if '.' in limit: name, limit = limit.split('.', 1) heads.append(name) new_limit_to.append(limit) else: heads.append(limit) return heads, new_limit_to
python
def split_limits_heads(self): """ Return first parts of dot-separated strings, and rest of strings. Returns: (list of str, list of str): the heads and rest of the strings. """ heads = [] new_limit_to = [] for limit in self.limit_to: if '.' in limit: name, limit = limit.split('.', 1) heads.append(name) new_limit_to.append(limit) else: heads.append(limit) return heads, new_limit_to
[ "def", "split_limits_heads", "(", "self", ")", ":", "heads", "=", "[", "]", "new_limit_to", "=", "[", "]", "for", "limit", "in", "self", ".", "limit_to", ":", "if", "'.'", "in", "limit", ":", "name", ",", "limit", "=", "limit", ".", "split", "(", "'.'", ",", "1", ")", "heads", ".", "append", "(", "name", ")", "new_limit_to", ".", "append", "(", "limit", ")", "else", ":", "heads", ".", "append", "(", "limit", ")", "return", "heads", ",", "new_limit_to" ]
Return first parts of dot-separated strings, and rest of strings. Returns: (list of str, list of str): the heads and rest of the strings.
[ "Return", "first", "parts", "of", "dot", "-", "separated", "strings", "and", "rest", "of", "strings", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L171-L187
train
Genida/dependenpy
src/dependenpy/dsm.py
Package.build_tree
def build_tree(self): """Build the tree for this package.""" for m in listdir(self.path): abs_m = join(self.path, m) if isfile(abs_m) and m.endswith('.py'): name = splitext(m)[0] if not self.limit_to or name in self.limit_to: self.modules.append(Module(name, abs_m, self.dsm, self)) elif isdir(abs_m): if isfile(join(abs_m, '__init__.py')) or not self.enforce_init: heads, new_limit_to = self.split_limits_heads() if not heads or m in heads: self.packages.append( Package(m, abs_m, self.dsm, self, new_limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
python
def build_tree(self): """Build the tree for this package.""" for m in listdir(self.path): abs_m = join(self.path, m) if isfile(abs_m) and m.endswith('.py'): name = splitext(m)[0] if not self.limit_to or name in self.limit_to: self.modules.append(Module(name, abs_m, self.dsm, self)) elif isdir(abs_m): if isfile(join(abs_m, '__init__.py')) or not self.enforce_init: heads, new_limit_to = self.split_limits_heads() if not heads or m in heads: self.packages.append( Package(m, abs_m, self.dsm, self, new_limit_to, build_tree=True, build_dependencies=False, enforce_init=self.enforce_init))
[ "def", "build_tree", "(", "self", ")", ":", "for", "m", "in", "listdir", "(", "self", ".", "path", ")", ":", "abs_m", "=", "join", "(", "self", ".", "path", ",", "m", ")", "if", "isfile", "(", "abs_m", ")", "and", "m", ".", "endswith", "(", "'.py'", ")", ":", "name", "=", "splitext", "(", "m", ")", "[", "0", "]", "if", "not", "self", ".", "limit_to", "or", "name", "in", "self", ".", "limit_to", ":", "self", ".", "modules", ".", "append", "(", "Module", "(", "name", ",", "abs_m", ",", "self", ".", "dsm", ",", "self", ")", ")", "elif", "isdir", "(", "abs_m", ")", ":", "if", "isfile", "(", "join", "(", "abs_m", ",", "'__init__.py'", ")", ")", "or", "not", "self", ".", "enforce_init", ":", "heads", ",", "new_limit_to", "=", "self", ".", "split_limits_heads", "(", ")", "if", "not", "heads", "or", "m", "in", "heads", ":", "self", ".", "packages", ".", "append", "(", "Package", "(", "m", ",", "abs_m", ",", "self", ".", "dsm", ",", "self", ",", "new_limit_to", ",", "build_tree", "=", "True", ",", "build_dependencies", "=", "False", ",", "enforce_init", "=", "self", ".", "enforce_init", ")", ")" ]
Build the tree for this package.
[ "Build", "the", "tree", "for", "this", "package", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L189-L205
train
Genida/dependenpy
src/dependenpy/dsm.py
Package.cardinal
def cardinal(self, to): """ Return the number of dependencies of this package to the given node. Args: to (Package/Module): target node. Returns: int: number of dependencies. """ return sum(m.cardinal(to) for m in self.submodules)
python
def cardinal(self, to): """ Return the number of dependencies of this package to the given node. Args: to (Package/Module): target node. Returns: int: number of dependencies. """ return sum(m.cardinal(to) for m in self.submodules)
[ "def", "cardinal", "(", "self", ",", "to", ")", ":", "return", "sum", "(", "m", ".", "cardinal", "(", "to", ")", "for", "m", "in", "self", ".", "submodules", ")" ]
Return the number of dependencies of this package to the given node. Args: to (Package/Module): target node. Returns: int: number of dependencies.
[ "Return", "the", "number", "of", "dependencies", "of", "this", "package", "to", "the", "given", "node", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L207-L217
train
Genida/dependenpy
src/dependenpy/dsm.py
Module.build_dependencies
def build_dependencies(self): """ Build the dependencies for this module. Parse the code with ast, find all the import statements, convert them into Dependency objects. """ highest = self.dsm or self.root if self is highest: highest = LeafNode() for _import in self.parse_code(): target = highest.get_target(_import['target']) if target: what = _import['target'].split('.')[-1] if what != target.name: _import['what'] = what _import['target'] = target self.dependencies.append(Dependency(source=self, **_import))
python
def build_dependencies(self): """ Build the dependencies for this module. Parse the code with ast, find all the import statements, convert them into Dependency objects. """ highest = self.dsm or self.root if self is highest: highest = LeafNode() for _import in self.parse_code(): target = highest.get_target(_import['target']) if target: what = _import['target'].split('.')[-1] if what != target.name: _import['what'] = what _import['target'] = target self.dependencies.append(Dependency(source=self, **_import))
[ "def", "build_dependencies", "(", "self", ")", ":", "highest", "=", "self", ".", "dsm", "or", "self", ".", "root", "if", "self", "is", "highest", ":", "highest", "=", "LeafNode", "(", ")", "for", "_import", "in", "self", ".", "parse_code", "(", ")", ":", "target", "=", "highest", ".", "get_target", "(", "_import", "[", "'target'", "]", ")", "if", "target", ":", "what", "=", "_import", "[", "'target'", "]", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "what", "!=", "target", ".", "name", ":", "_import", "[", "'what'", "]", "=", "what", "_import", "[", "'target'", "]", "=", "target", "self", ".", "dependencies", ".", "append", "(", "Dependency", "(", "source", "=", "self", ",", "*", "*", "_import", ")", ")" ]
Build the dependencies for this module. Parse the code with ast, find all the import statements, convert them into Dependency objects.
[ "Build", "the", "dependencies", "for", "this", "module", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L319-L336
train
Genida/dependenpy
src/dependenpy/dsm.py
Module.parse_code
def parse_code(self): """ Read the source code and return all the import statements. Returns: list of dict: the import statements. """ code = open(self.path, encoding='utf-8').read() try: body = ast.parse(code).body except SyntaxError: try: code = code.encode('utf-8') body = ast.parse(code).body except SyntaxError: return [] return self.get_imports(body)
python
def parse_code(self): """ Read the source code and return all the import statements. Returns: list of dict: the import statements. """ code = open(self.path, encoding='utf-8').read() try: body = ast.parse(code).body except SyntaxError: try: code = code.encode('utf-8') body = ast.parse(code).body except SyntaxError: return [] return self.get_imports(body)
[ "def", "parse_code", "(", "self", ")", ":", "code", "=", "open", "(", "self", ".", "path", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "try", ":", "body", "=", "ast", ".", "parse", "(", "code", ")", ".", "body", "except", "SyntaxError", ":", "try", ":", "code", "=", "code", ".", "encode", "(", "'utf-8'", ")", "body", "=", "ast", ".", "parse", "(", "code", ")", ".", "body", "except", "SyntaxError", ":", "return", "[", "]", "return", "self", ".", "get_imports", "(", "body", ")" ]
Read the source code and return all the import statements. Returns: list of dict: the import statements.
[ "Read", "the", "source", "code", "and", "return", "all", "the", "import", "statements", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L338-L354
train
Genida/dependenpy
src/dependenpy/dsm.py
Module.cardinal
def cardinal(self, to): """ Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies. """ return sum(1 for _ in filter( lambda d: not d.external and d.target in to, self.dependencies))
python
def cardinal(self, to): """ Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies. """ return sum(1 for _ in filter( lambda d: not d.external and d.target in to, self.dependencies))
[ "def", "cardinal", "(", "self", ",", "to", ")", ":", "return", "sum", "(", "1", "for", "_", "in", "filter", "(", "lambda", "d", ":", "not", "d", ".", "external", "and", "d", ".", "target", "in", "to", ",", "self", ".", "dependencies", ")", ")" ]
Return the number of dependencies of this module to the given node. Args: to (Package/Module): the target node. Returns: int: number of dependencies.
[ "Return", "the", "number", "of", "dependencies", "of", "this", "module", "to", "the", "given", "node", "." ]
df099c17cbe735c990eca9197e39cfc5eb8a4c8e
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/dsm.py#L386-L397
train
romanorac/discomll
discomll/dataset.py
Data.generate_urls
def generate_urls(self, first_url, last_url): """ Function generates URLs in split command fashion. If first_url is xaaaaa and last_url is xaaaac, it will automatically generate xaaaab. """ first_url = first_url.split("/") last_url = last_url.split("/") if first_url[0].lower() != "http:" or last_url[0].lower() != "http:": raise Exception("URLs should be accessible via HTTP.") url_base = "/".join(first_url[:-1]) start_index = first_url[-1].index("a") file_name = first_url[-1][0:start_index] url_base += "/" + file_name start = first_url[-1][start_index:] finish = last_url[-1][start_index:] if start.count(".") == 1 and finish.count(".") == 1: start, file_extension = start.split(".") finish, _ = finish.split(".") if len(start) != len(finish): raise Exception("Filenames in url should have the same length.") file_extension = "." + file_extension else: raise Exception("URLs does not have the same pattern.") alphabet = "abcdefghijklmnopqrstuvwxyz" product = itertools.product(alphabet, repeat=len(start)) urls = [] for p in product: urls.append([url_base + "".join(p) + file_extension]) if "".join(p) == finish: break return urls
python
def generate_urls(self, first_url, last_url): """ Function generates URLs in split command fashion. If first_url is xaaaaa and last_url is xaaaac, it will automatically generate xaaaab. """ first_url = first_url.split("/") last_url = last_url.split("/") if first_url[0].lower() != "http:" or last_url[0].lower() != "http:": raise Exception("URLs should be accessible via HTTP.") url_base = "/".join(first_url[:-1]) start_index = first_url[-1].index("a") file_name = first_url[-1][0:start_index] url_base += "/" + file_name start = first_url[-1][start_index:] finish = last_url[-1][start_index:] if start.count(".") == 1 and finish.count(".") == 1: start, file_extension = start.split(".") finish, _ = finish.split(".") if len(start) != len(finish): raise Exception("Filenames in url should have the same length.") file_extension = "." + file_extension else: raise Exception("URLs does not have the same pattern.") alphabet = "abcdefghijklmnopqrstuvwxyz" product = itertools.product(alphabet, repeat=len(start)) urls = [] for p in product: urls.append([url_base + "".join(p) + file_extension]) if "".join(p) == finish: break return urls
[ "def", "generate_urls", "(", "self", ",", "first_url", ",", "last_url", ")", ":", "first_url", "=", "first_url", ".", "split", "(", "\"/\"", ")", "last_url", "=", "last_url", ".", "split", "(", "\"/\"", ")", "if", "first_url", "[", "0", "]", ".", "lower", "(", ")", "!=", "\"http:\"", "or", "last_url", "[", "0", "]", ".", "lower", "(", ")", "!=", "\"http:\"", ":", "raise", "Exception", "(", "\"URLs should be accessible via HTTP.\"", ")", "url_base", "=", "\"/\"", ".", "join", "(", "first_url", "[", ":", "-", "1", "]", ")", "start_index", "=", "first_url", "[", "-", "1", "]", ".", "index", "(", "\"a\"", ")", "file_name", "=", "first_url", "[", "-", "1", "]", "[", "0", ":", "start_index", "]", "url_base", "+=", "\"/\"", "+", "file_name", "start", "=", "first_url", "[", "-", "1", "]", "[", "start_index", ":", "]", "finish", "=", "last_url", "[", "-", "1", "]", "[", "start_index", ":", "]", "if", "start", ".", "count", "(", "\".\"", ")", "==", "1", "and", "finish", ".", "count", "(", "\".\"", ")", "==", "1", ":", "start", ",", "file_extension", "=", "start", ".", "split", "(", "\".\"", ")", "finish", ",", "_", "=", "finish", ".", "split", "(", "\".\"", ")", "if", "len", "(", "start", ")", "!=", "len", "(", "finish", ")", ":", "raise", "Exception", "(", "\"Filenames in url should have the same length.\"", ")", "file_extension", "=", "\".\"", "+", "file_extension", "else", ":", "raise", "Exception", "(", "\"URLs does not have the same pattern.\"", ")", "alphabet", "=", "\"abcdefghijklmnopqrstuvwxyz\"", "product", "=", "itertools", ".", "product", "(", "alphabet", ",", "repeat", "=", "len", "(", "start", ")", ")", "urls", "=", "[", "]", "for", "p", "in", "product", ":", "urls", ".", "append", "(", "[", "url_base", "+", "\"\"", ".", "join", "(", "p", ")", "+", "file_extension", "]", ")", "if", "\"\"", ".", "join", "(", "p", ")", "==", "finish", ":", "break", "return", "urls" ]
Function generates URLs in split command fashion. If first_url is xaaaaa and last_url is xaaaac, it will automatically generate xaaaab.
[ "Function", "generates", "URLs", "in", "split", "command", "fashion", ".", "If", "first_url", "is", "xaaaaa", "and", "last_url", "is", "xaaaac", "it", "will", "automatically", "generate", "xaaaab", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/dataset.py#L160-L194
train
oemof/oemof.db
oemof/db/coastdat.py
fetch_raw_data
def fetch_raw_data(sql, connection, geometry): """ Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index. """ tmp_dc = {} weather_df = pd.DataFrame( connection.execute(sql).fetchall(), columns=[ 'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series', 'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop( 'dat_id', 1) # Get the timezone of the geometry tz = tools.tz_from_geom(connection, geometry) for ix in weather_df.index: # Convert the point of the weather location to a shapely object weather_df.loc[ix, 'geom_point'] = wkt_loads( weather_df['geom_point'][ix]) # Roll the dataset forward according to the timezone, because the # dataset is based on utc (Berlin +1, Kiev +2, London +0) utc = timezone('utc') offset = int(utc.localize(datetime(2002, 1, 1)).astimezone( timezone(tz)).strftime("%z")[:-2]) # Get the year and the length of the data array db_year = weather_df.loc[ix, 'year'] db_len = len(weather_df['time_series'][ix]) # Set absolute time index for the data sets to avoid errors. tmp_dc[ix] = pd.Series( np.roll(np.array(weather_df['time_series'][ix]), offset), index=pd.date_range(pd.datetime(db_year, 1, 1, 0), periods=db_len, freq='H', tz=tz)) weather_df['time_series'] = pd.Series(tmp_dc) return weather_df
python
def fetch_raw_data(sql, connection, geometry): """ Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index. """ tmp_dc = {} weather_df = pd.DataFrame( connection.execute(sql).fetchall(), columns=[ 'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series', 'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop( 'dat_id', 1) # Get the timezone of the geometry tz = tools.tz_from_geom(connection, geometry) for ix in weather_df.index: # Convert the point of the weather location to a shapely object weather_df.loc[ix, 'geom_point'] = wkt_loads( weather_df['geom_point'][ix]) # Roll the dataset forward according to the timezone, because the # dataset is based on utc (Berlin +1, Kiev +2, London +0) utc = timezone('utc') offset = int(utc.localize(datetime(2002, 1, 1)).astimezone( timezone(tz)).strftime("%z")[:-2]) # Get the year and the length of the data array db_year = weather_df.loc[ix, 'year'] db_len = len(weather_df['time_series'][ix]) # Set absolute time index for the data sets to avoid errors. tmp_dc[ix] = pd.Series( np.roll(np.array(weather_df['time_series'][ix]), offset), index=pd.date_range(pd.datetime(db_year, 1, 1, 0), periods=db_len, freq='H', tz=tz)) weather_df['time_series'] = pd.Series(tmp_dc) return weather_df
[ "def", "fetch_raw_data", "(", "sql", ",", "connection", ",", "geometry", ")", ":", "tmp_dc", "=", "{", "}", "weather_df", "=", "pd", ".", "DataFrame", "(", "connection", ".", "execute", "(", "sql", ")", ".", "fetchall", "(", ")", ",", "columns", "=", "[", "'gid'", ",", "'geom_point'", ",", "'geom_polygon'", ",", "'data_id'", ",", "'time_series'", ",", "'dat_id'", ",", "'type_id'", ",", "'type'", ",", "'height'", ",", "'year'", ",", "'leap_year'", "]", ")", ".", "drop", "(", "'dat_id'", ",", "1", ")", "# Get the timezone of the geometry", "tz", "=", "tools", ".", "tz_from_geom", "(", "connection", ",", "geometry", ")", "for", "ix", "in", "weather_df", ".", "index", ":", "# Convert the point of the weather location to a shapely object", "weather_df", ".", "loc", "[", "ix", ",", "'geom_point'", "]", "=", "wkt_loads", "(", "weather_df", "[", "'geom_point'", "]", "[", "ix", "]", ")", "# Roll the dataset forward according to the timezone, because the", "# dataset is based on utc (Berlin +1, Kiev +2, London +0)", "utc", "=", "timezone", "(", "'utc'", ")", "offset", "=", "int", "(", "utc", ".", "localize", "(", "datetime", "(", "2002", ",", "1", ",", "1", ")", ")", ".", "astimezone", "(", "timezone", "(", "tz", ")", ")", ".", "strftime", "(", "\"%z\"", ")", "[", ":", "-", "2", "]", ")", "# Get the year and the length of the data array", "db_year", "=", "weather_df", ".", "loc", "[", "ix", ",", "'year'", "]", "db_len", "=", "len", "(", "weather_df", "[", "'time_series'", "]", "[", "ix", "]", ")", "# Set absolute time index for the data sets to avoid errors.", "tmp_dc", "[", "ix", "]", "=", "pd", ".", "Series", "(", "np", ".", "roll", "(", "np", ".", "array", "(", "weather_df", "[", "'time_series'", "]", "[", "ix", "]", ")", ",", "offset", ")", ",", "index", "=", "pd", ".", "date_range", "(", "pd", ".", "datetime", "(", "db_year", ",", "1", ",", "1", ",", "0", ")", ",", "periods", "=", "db_len", ",", "freq", "=", "'H'", ",", "tz", "=", "tz", ")", ")", "weather_df", "[", "'time_series'", "]", "=", "pd", ".", "Series", "(", "tmp_dc", ")", "return", "weather_df" ]
Fetch the coastdat2 from the database, adapt it to the local time zone and create a time index.
[ "Fetch", "the", "coastdat2", "from", "the", "database", "adapt", "it", "to", "the", "local", "time", "zone", "and", "create", "a", "time", "index", "." ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L92-L128
train
oemof/oemof.db
oemof/db/coastdat.py
create_single_weather
def create_single_weather(df, rename_dc): """Create an oemof weather object for the given geometry""" my_weather = weather.FeedinWeather() data_height = {} name = None # Create a pandas.DataFrame with the time series of the weather data set weather_df = pd.DataFrame(index=df.time_series.iloc[0].index) for row in df.iterrows(): key = rename_dc[row[1].type] weather_df[key] = row[1].time_series data_height[key] = row[1].height if not np.isnan(row[1].height) else 0 name = row[1].gid my_weather.data = weather_df my_weather.timezone = weather_df.index.tz my_weather.longitude = df.geom_point.iloc[0].x my_weather.latitude = df.geom_point.iloc[0].y my_weather.geometry = df.geom_point.iloc[0] my_weather.data_height = data_height my_weather.name = name return my_weather
python
def create_single_weather(df, rename_dc): """Create an oemof weather object for the given geometry""" my_weather = weather.FeedinWeather() data_height = {} name = None # Create a pandas.DataFrame with the time series of the weather data set weather_df = pd.DataFrame(index=df.time_series.iloc[0].index) for row in df.iterrows(): key = rename_dc[row[1].type] weather_df[key] = row[1].time_series data_height[key] = row[1].height if not np.isnan(row[1].height) else 0 name = row[1].gid my_weather.data = weather_df my_weather.timezone = weather_df.index.tz my_weather.longitude = df.geom_point.iloc[0].x my_weather.latitude = df.geom_point.iloc[0].y my_weather.geometry = df.geom_point.iloc[0] my_weather.data_height = data_height my_weather.name = name return my_weather
[ "def", "create_single_weather", "(", "df", ",", "rename_dc", ")", ":", "my_weather", "=", "weather", ".", "FeedinWeather", "(", ")", "data_height", "=", "{", "}", "name", "=", "None", "# Create a pandas.DataFrame with the time series of the weather data set", "weather_df", "=", "pd", ".", "DataFrame", "(", "index", "=", "df", ".", "time_series", ".", "iloc", "[", "0", "]", ".", "index", ")", "for", "row", "in", "df", ".", "iterrows", "(", ")", ":", "key", "=", "rename_dc", "[", "row", "[", "1", "]", ".", "type", "]", "weather_df", "[", "key", "]", "=", "row", "[", "1", "]", ".", "time_series", "data_height", "[", "key", "]", "=", "row", "[", "1", "]", ".", "height", "if", "not", "np", ".", "isnan", "(", "row", "[", "1", "]", ".", "height", ")", "else", "0", "name", "=", "row", "[", "1", "]", ".", "gid", "my_weather", ".", "data", "=", "weather_df", "my_weather", ".", "timezone", "=", "weather_df", ".", "index", ".", "tz", "my_weather", ".", "longitude", "=", "df", ".", "geom_point", ".", "iloc", "[", "0", "]", ".", "x", "my_weather", ".", "latitude", "=", "df", ".", "geom_point", ".", "iloc", "[", "0", "]", ".", "y", "my_weather", ".", "geometry", "=", "df", ".", "geom_point", ".", "iloc", "[", "0", "]", "my_weather", ".", "data_height", "=", "data_height", "my_weather", ".", "name", "=", "name", "return", "my_weather" ]
Create an oemof weather object for the given geometry
[ "Create", "an", "oemof", "weather", "object", "for", "the", "given", "geometry" ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L131-L150
train
oemof/oemof.db
oemof/db/coastdat.py
create_multi_weather
def create_multi_weather(df, rename_dc): """Create a list of oemof weather objects if the given geometry is a polygon """ weather_list = [] # Create a pandas.DataFrame with the time series of the weather data set # for each data set and append them to a list. for gid in df.gid.unique(): gid_df = df[df.gid == gid] obj = create_single_weather(gid_df, rename_dc) weather_list.append(obj) return weather_list
python
def create_multi_weather(df, rename_dc): """Create a list of oemof weather objects if the given geometry is a polygon """ weather_list = [] # Create a pandas.DataFrame with the time series of the weather data set # for each data set and append them to a list. for gid in df.gid.unique(): gid_df = df[df.gid == gid] obj = create_single_weather(gid_df, rename_dc) weather_list.append(obj) return weather_list
[ "def", "create_multi_weather", "(", "df", ",", "rename_dc", ")", ":", "weather_list", "=", "[", "]", "# Create a pandas.DataFrame with the time series of the weather data set", "# for each data set and append them to a list.", "for", "gid", "in", "df", ".", "gid", ".", "unique", "(", ")", ":", "gid_df", "=", "df", "[", "df", ".", "gid", "==", "gid", "]", "obj", "=", "create_single_weather", "(", "gid_df", ",", "rename_dc", ")", "weather_list", ".", "append", "(", "obj", ")", "return", "weather_list" ]
Create a list of oemof weather objects if the given geometry is a polygon
[ "Create", "a", "list", "of", "oemof", "weather", "objects", "if", "the", "given", "geometry", "is", "a", "polygon" ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L153-L163
train
romanorac/discomll
discomll/ensemble/core/decision_tree.py
predict
def predict(tree, x, y=[], dist=False): """ Function makes a prediction of one sample with a tree model. If y label is defined it returns node identifier and margin. tree: dictionary - tree model x: numpy array - one sample from the dataset y: string, integer or float - sample label """ # conditions of continuous and discrete features node_id = 1 # initialize node identifier as first node under the root while 1: nodes = tree[node_id] if nodes[0][5] == "c": if x[nodes[0][1]] <= nodes[0][2]: index, node_id = 0, nodes[0][0] # set identifier of child node else: index, node_id = 1, nodes[1][0] # set identifier of child node else: if x[nodes[0][1]] in nodes[0][2]: index, node_id = 0, nodes[0][0] # set identifier of child node elif x[nodes[1][1]] in nodes[1][2]: index, node_id = 1, nodes[1][0] # set identifier of child node else: # value is not in left or right branch. Get label distributions of left and right child # sum labels distribution to get parent label distribution node_id = str(nodes[0][0]) + "," + str(nodes[1][0]) index, nodes = 0, [[0, 0, 0, {k: nodes[0][3].get(k, 0) + nodes[1][3].get(k, 0) for k in set(nodes[0][3]) | set(nodes[1][3])}]] if node_id in tree.keys(): # check if tree can be traversed further continue if dist: suma = sum(nodes[index][3].values()) return Counter({k: v / float(suma) for k, v in nodes[index][3].iteritems()}) prediction = max(nodes[index][3], key=nodes[index][3].get) if y == []: return prediction probs = sorted( zip(nodes[index][3].keys(), np.true_divide(nodes[index][3].values(), np.sum(nodes[index][3].values()))), key=itemgetter(1), reverse=True) if prediction == y: margin = probs[0][1] - probs[1][1] if len(probs) > 1 else 1 else: margin = dict(probs).get(y, 0) - probs[0][1] return node_id, margin
python
def predict(tree, x, y=[], dist=False): """ Function makes a prediction of one sample with a tree model. If y label is defined it returns node identifier and margin. tree: dictionary - tree model x: numpy array - one sample from the dataset y: string, integer or float - sample label """ # conditions of continuous and discrete features node_id = 1 # initialize node identifier as first node under the root while 1: nodes = tree[node_id] if nodes[0][5] == "c": if x[nodes[0][1]] <= nodes[0][2]: index, node_id = 0, nodes[0][0] # set identifier of child node else: index, node_id = 1, nodes[1][0] # set identifier of child node else: if x[nodes[0][1]] in nodes[0][2]: index, node_id = 0, nodes[0][0] # set identifier of child node elif x[nodes[1][1]] in nodes[1][2]: index, node_id = 1, nodes[1][0] # set identifier of child node else: # value is not in left or right branch. Get label distributions of left and right child # sum labels distribution to get parent label distribution node_id = str(nodes[0][0]) + "," + str(nodes[1][0]) index, nodes = 0, [[0, 0, 0, {k: nodes[0][3].get(k, 0) + nodes[1][3].get(k, 0) for k in set(nodes[0][3]) | set(nodes[1][3])}]] if node_id in tree.keys(): # check if tree can be traversed further continue if dist: suma = sum(nodes[index][3].values()) return Counter({k: v / float(suma) for k, v in nodes[index][3].iteritems()}) prediction = max(nodes[index][3], key=nodes[index][3].get) if y == []: return prediction probs = sorted( zip(nodes[index][3].keys(), np.true_divide(nodes[index][3].values(), np.sum(nodes[index][3].values()))), key=itemgetter(1), reverse=True) if prediction == y: margin = probs[0][1] - probs[1][1] if len(probs) > 1 else 1 else: margin = dict(probs).get(y, 0) - probs[0][1] return node_id, margin
[ "def", "predict", "(", "tree", ",", "x", ",", "y", "=", "[", "]", ",", "dist", "=", "False", ")", ":", "# conditions of continuous and discrete features", "node_id", "=", "1", "# initialize node identifier as first node under the root", "while", "1", ":", "nodes", "=", "tree", "[", "node_id", "]", "if", "nodes", "[", "0", "]", "[", "5", "]", "==", "\"c\"", ":", "if", "x", "[", "nodes", "[", "0", "]", "[", "1", "]", "]", "<=", "nodes", "[", "0", "]", "[", "2", "]", ":", "index", ",", "node_id", "=", "0", ",", "nodes", "[", "0", "]", "[", "0", "]", "# set identifier of child node", "else", ":", "index", ",", "node_id", "=", "1", ",", "nodes", "[", "1", "]", "[", "0", "]", "# set identifier of child node", "else", ":", "if", "x", "[", "nodes", "[", "0", "]", "[", "1", "]", "]", "in", "nodes", "[", "0", "]", "[", "2", "]", ":", "index", ",", "node_id", "=", "0", ",", "nodes", "[", "0", "]", "[", "0", "]", "# set identifier of child node", "elif", "x", "[", "nodes", "[", "1", "]", "[", "1", "]", "]", "in", "nodes", "[", "1", "]", "[", "2", "]", ":", "index", ",", "node_id", "=", "1", ",", "nodes", "[", "1", "]", "[", "0", "]", "# set identifier of child node", "else", ":", "# value is not in left or right branch. Get label distributions of left and right child", "# sum labels distribution to get parent label distribution", "node_id", "=", "str", "(", "nodes", "[", "0", "]", "[", "0", "]", ")", "+", "\",\"", "+", "str", "(", "nodes", "[", "1", "]", "[", "0", "]", ")", "index", ",", "nodes", "=", "0", ",", "[", "[", "0", ",", "0", ",", "0", ",", "{", "k", ":", "nodes", "[", "0", "]", "[", "3", "]", ".", "get", "(", "k", ",", "0", ")", "+", "nodes", "[", "1", "]", "[", "3", "]", ".", "get", "(", "k", ",", "0", ")", "for", "k", "in", "set", "(", "nodes", "[", "0", "]", "[", "3", "]", ")", "|", "set", "(", "nodes", "[", "1", "]", "[", "3", "]", ")", "}", "]", "]", "if", "node_id", "in", "tree", ".", "keys", "(", ")", ":", "# check if tree can be traversed further", "continue", "if", "dist", ":", "suma", "=", "sum", "(", "nodes", "[", "index", "]", "[", "3", "]", ".", "values", "(", ")", ")", "return", "Counter", "(", "{", "k", ":", "v", "/", "float", "(", "suma", ")", "for", "k", ",", "v", "in", "nodes", "[", "index", "]", "[", "3", "]", ".", "iteritems", "(", ")", "}", ")", "prediction", "=", "max", "(", "nodes", "[", "index", "]", "[", "3", "]", ",", "key", "=", "nodes", "[", "index", "]", "[", "3", "]", ".", "get", ")", "if", "y", "==", "[", "]", ":", "return", "prediction", "probs", "=", "sorted", "(", "zip", "(", "nodes", "[", "index", "]", "[", "3", "]", ".", "keys", "(", ")", ",", "np", ".", "true_divide", "(", "nodes", "[", "index", "]", "[", "3", "]", ".", "values", "(", ")", ",", "np", ".", "sum", "(", "nodes", "[", "index", "]", "[", "3", "]", ".", "values", "(", ")", ")", ")", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "if", "prediction", "==", "y", ":", "margin", "=", "probs", "[", "0", "]", "[", "1", "]", "-", "probs", "[", "1", "]", "[", "1", "]", "if", "len", "(", "probs", ")", ">", "1", "else", "1", "else", ":", "margin", "=", "dict", "(", "probs", ")", ".", "get", "(", "y", ",", "0", ")", "-", "probs", "[", "0", "]", "[", "1", "]", "return", "node_id", ",", "margin" ]
Function makes a prediction of one sample with a tree model. If y label is defined it returns node identifier and margin. tree: dictionary - tree model x: numpy array - one sample from the dataset y: string, integer or float - sample label
[ "Function", "makes", "a", "prediction", "of", "one", "sample", "with", "a", "tree", "model", ".", "If", "y", "label", "is", "defined", "it", "returns", "node", "identifier", "and", "margin", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/ensemble/core/decision_tree.py#L142-L193
train
mgoral/subconvert
src/subconvert/gui/SubtitleWindow.py
SubTabWidget.__addTab
def __addTab(self, filePath): """Returns existing tab index. Creates a new one if it isn't opened and returns its index otherwise.""" for i in range(self.tabBar.count()): widget = self.pages.widget(i) if not widget.isStatic and filePath == widget.filePath: return i tab = SubtitleEditor(filePath, self._subtitleData, self) newIndex = self.tabBar.addTab(self._createTabName(tab.name, tab.history.isClean())) tab.history.cleanChanged.connect( lambda clean: self._cleanStateForFileChanged(filePath, clean)) self.pages.addWidget(tab) return newIndex
python
def __addTab(self, filePath): """Returns existing tab index. Creates a new one if it isn't opened and returns its index otherwise.""" for i in range(self.tabBar.count()): widget = self.pages.widget(i) if not widget.isStatic and filePath == widget.filePath: return i tab = SubtitleEditor(filePath, self._subtitleData, self) newIndex = self.tabBar.addTab(self._createTabName(tab.name, tab.history.isClean())) tab.history.cleanChanged.connect( lambda clean: self._cleanStateForFileChanged(filePath, clean)) self.pages.addWidget(tab) return newIndex
[ "def", "__addTab", "(", "self", ",", "filePath", ")", ":", "for", "i", "in", "range", "(", "self", ".", "tabBar", ".", "count", "(", ")", ")", ":", "widget", "=", "self", ".", "pages", ".", "widget", "(", "i", ")", "if", "not", "widget", ".", "isStatic", "and", "filePath", "==", "widget", ".", "filePath", ":", "return", "i", "tab", "=", "SubtitleEditor", "(", "filePath", ",", "self", ".", "_subtitleData", ",", "self", ")", "newIndex", "=", "self", ".", "tabBar", ".", "addTab", "(", "self", ".", "_createTabName", "(", "tab", ".", "name", ",", "tab", ".", "history", ".", "isClean", "(", ")", ")", ")", "tab", ".", "history", ".", "cleanChanged", ".", "connect", "(", "lambda", "clean", ":", "self", ".", "_cleanStateForFileChanged", "(", "filePath", ",", "clean", ")", ")", "self", ".", "pages", ".", "addWidget", "(", "tab", ")", "return", "newIndex" ]
Returns existing tab index. Creates a new one if it isn't opened and returns its index otherwise.
[ "Returns", "existing", "tab", "index", ".", "Creates", "a", "new", "one", "if", "it", "isn", "t", "opened", "and", "returns", "its", "index", "otherwise", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/SubtitleWindow.py#L119-L131
train
iLampard/x-utils
xutils/config_utils.py
find_and_parse_config
def find_and_parse_config(config, default_config='default.yaml'): """Finds the service configuration file and parses it. Checks also a directory called default, to check for default configuration values, that will be overwritten by the actual configuration found on given path. """ def load_config(path): if os.path.isfile(path): with open(path, 'r') as f: config_dict_ = yaml.load(f) return config_dict_ config_path = find_file(config) default_path = find_file(default_config) config = load_config(config_path) default_config = load_config(default_path) if config is None and default_config is None: raise ValueError('Both config and default_config return None') if config is None: config_dict = default_config elif default_config is None: config_dict = config else: config_dict = merge(default_config, config) return config_dict
python
def find_and_parse_config(config, default_config='default.yaml'): """Finds the service configuration file and parses it. Checks also a directory called default, to check for default configuration values, that will be overwritten by the actual configuration found on given path. """ def load_config(path): if os.path.isfile(path): with open(path, 'r') as f: config_dict_ = yaml.load(f) return config_dict_ config_path = find_file(config) default_path = find_file(default_config) config = load_config(config_path) default_config = load_config(default_path) if config is None and default_config is None: raise ValueError('Both config and default_config return None') if config is None: config_dict = default_config elif default_config is None: config_dict = config else: config_dict = merge(default_config, config) return config_dict
[ "def", "find_and_parse_config", "(", "config", ",", "default_config", "=", "'default.yaml'", ")", ":", "def", "load_config", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "config_dict_", "=", "yaml", ".", "load", "(", "f", ")", "return", "config_dict_", "config_path", "=", "find_file", "(", "config", ")", "default_path", "=", "find_file", "(", "default_config", ")", "config", "=", "load_config", "(", "config_path", ")", "default_config", "=", "load_config", "(", "default_path", ")", "if", "config", "is", "None", "and", "default_config", "is", "None", ":", "raise", "ValueError", "(", "'Both config and default_config return None'", ")", "if", "config", "is", "None", ":", "config_dict", "=", "default_config", "elif", "default_config", "is", "None", ":", "config_dict", "=", "config", "else", ":", "config_dict", "=", "merge", "(", "default_config", ",", "config", ")", "return", "config_dict" ]
Finds the service configuration file and parses it. Checks also a directory called default, to check for default configuration values, that will be overwritten by the actual configuration found on given path.
[ "Finds", "the", "service", "configuration", "file", "and", "parses", "it", ".", "Checks", "also", "a", "directory", "called", "default", "to", "check", "for", "default", "configuration", "values", "that", "will", "be", "overwritten", "by", "the", "actual", "configuration", "found", "on", "given", "path", "." ]
291d92832ee0e0c89bc22e10ecf2f44445e0d300
https://github.com/iLampard/x-utils/blob/291d92832ee0e0c89bc22e10ecf2f44445e0d300/xutils/config_utils.py#L76-L101
train
mgoral/subconvert
src/subconvert/cli/syncparse.py
parse
def parse(s, subs): """Parses a given string and creates a list of SyncPoints.""" if len(subs) == 0: return [] points = [] requests = _tokenize_request(s) if len(requests) == 1 and requests[0].type_ == _Request.Type.OFFSET: return _offset_subtitles(requests[0], subs) return _sync_subtitles(requests, subs)
python
def parse(s, subs): """Parses a given string and creates a list of SyncPoints.""" if len(subs) == 0: return [] points = [] requests = _tokenize_request(s) if len(requests) == 1 and requests[0].type_ == _Request.Type.OFFSET: return _offset_subtitles(requests[0], subs) return _sync_subtitles(requests, subs)
[ "def", "parse", "(", "s", ",", "subs", ")", ":", "if", "len", "(", "subs", ")", "==", "0", ":", "return", "[", "]", "points", "=", "[", "]", "requests", "=", "_tokenize_request", "(", "s", ")", "if", "len", "(", "requests", ")", "==", "1", "and", "requests", "[", "0", "]", ".", "type_", "==", "_Request", ".", "Type", ".", "OFFSET", ":", "return", "_offset_subtitles", "(", "requests", "[", "0", "]", ",", "subs", ")", "return", "_sync_subtitles", "(", "requests", ",", "subs", ")" ]
Parses a given string and creates a list of SyncPoints.
[ "Parses", "a", "given", "string", "and", "creates", "a", "list", "of", "SyncPoints", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/cli/syncparse.py#L172-L182
train
djaodjin/djaodjin-deployutils
deployutils/helpers.py
full_name_natural_split
def full_name_natural_split(full_name): """ This function splits a full name into a natural first name, last name and middle initials. """ parts = full_name.strip().split(' ') first_name = "" if parts: first_name = parts.pop(0) if first_name.lower() == "el" and parts: first_name += " " + parts.pop(0) last_name = "" if parts: last_name = parts.pop() if (last_name.lower() == 'i' or last_name.lower() == 'ii' or last_name.lower() == 'iii' and parts): last_name = parts.pop() + " " + last_name middle_initials = "" for middle_name in parts: if middle_name: middle_initials += middle_name[0] return first_name, middle_initials, last_name
python
def full_name_natural_split(full_name): """ This function splits a full name into a natural first name, last name and middle initials. """ parts = full_name.strip().split(' ') first_name = "" if parts: first_name = parts.pop(0) if first_name.lower() == "el" and parts: first_name += " " + parts.pop(0) last_name = "" if parts: last_name = parts.pop() if (last_name.lower() == 'i' or last_name.lower() == 'ii' or last_name.lower() == 'iii' and parts): last_name = parts.pop() + " " + last_name middle_initials = "" for middle_name in parts: if middle_name: middle_initials += middle_name[0] return first_name, middle_initials, last_name
[ "def", "full_name_natural_split", "(", "full_name", ")", ":", "parts", "=", "full_name", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "first_name", "=", "\"\"", "if", "parts", ":", "first_name", "=", "parts", ".", "pop", "(", "0", ")", "if", "first_name", ".", "lower", "(", ")", "==", "\"el\"", "and", "parts", ":", "first_name", "+=", "\" \"", "+", "parts", ".", "pop", "(", "0", ")", "last_name", "=", "\"\"", "if", "parts", ":", "last_name", "=", "parts", ".", "pop", "(", ")", "if", "(", "last_name", ".", "lower", "(", ")", "==", "'i'", "or", "last_name", ".", "lower", "(", ")", "==", "'ii'", "or", "last_name", ".", "lower", "(", ")", "==", "'iii'", "and", "parts", ")", ":", "last_name", "=", "parts", ".", "pop", "(", ")", "+", "\" \"", "+", "last_name", "middle_initials", "=", "\"\"", "for", "middle_name", "in", "parts", ":", "if", "middle_name", ":", "middle_initials", "+=", "middle_name", "[", "0", "]", "return", "first_name", ",", "middle_initials", ",", "last_name" ]
This function splits a full name into a natural first name, last name and middle initials.
[ "This", "function", "splits", "a", "full", "name", "into", "a", "natural", "first", "name", "last", "name", "and", "middle", "initials", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/helpers.py#L45-L66
train
pneff/wsgiservice
wsgiservice/xmlserializer.py
_get_xml_value
def _get_xml_value(value): """Convert an individual value to an XML string. Calls itself recursively for dictionaries and lists. Uses some heuristics to convert the data to XML: - In dictionaries, the keys become the tag name. - In lists the tag name is 'child' with an order-attribute giving the list index. - All other values are included as is. All values are escaped to fit into the XML document. :param value: The value to convert to XML. :type value: Any valid Python value :rtype: string """ retval = [] if isinstance(value, dict): for key, value in value.iteritems(): retval.append('<' + xml_escape(str(key)) + '>') retval.append(_get_xml_value(value)) retval.append('</' + xml_escape(str(key)) + '>') elif isinstance(value, list): for key, value in enumerate(value): retval.append('<child order="' + xml_escape(str(key)) + '">') retval.append(_get_xml_value(value)) retval.append('</child>') elif isinstance(value, bool): retval.append(xml_escape(str(value).lower())) elif isinstance(value, unicode): retval.append(xml_escape(value.encode('utf-8'))) else: retval.append(xml_escape(str(value))) return "".join(retval)
python
def _get_xml_value(value): """Convert an individual value to an XML string. Calls itself recursively for dictionaries and lists. Uses some heuristics to convert the data to XML: - In dictionaries, the keys become the tag name. - In lists the tag name is 'child' with an order-attribute giving the list index. - All other values are included as is. All values are escaped to fit into the XML document. :param value: The value to convert to XML. :type value: Any valid Python value :rtype: string """ retval = [] if isinstance(value, dict): for key, value in value.iteritems(): retval.append('<' + xml_escape(str(key)) + '>') retval.append(_get_xml_value(value)) retval.append('</' + xml_escape(str(key)) + '>') elif isinstance(value, list): for key, value in enumerate(value): retval.append('<child order="' + xml_escape(str(key)) + '">') retval.append(_get_xml_value(value)) retval.append('</child>') elif isinstance(value, bool): retval.append(xml_escape(str(value).lower())) elif isinstance(value, unicode): retval.append(xml_escape(value.encode('utf-8'))) else: retval.append(xml_escape(str(value))) return "".join(retval)
[ "def", "_get_xml_value", "(", "value", ")", ":", "retval", "=", "[", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "value", ".", "iteritems", "(", ")", ":", "retval", ".", "append", "(", "'<'", "+", "xml_escape", "(", "str", "(", "key", ")", ")", "+", "'>'", ")", "retval", ".", "append", "(", "_get_xml_value", "(", "value", ")", ")", "retval", ".", "append", "(", "'</'", "+", "xml_escape", "(", "str", "(", "key", ")", ")", "+", "'>'", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "key", ",", "value", "in", "enumerate", "(", "value", ")", ":", "retval", ".", "append", "(", "'<child order=\"'", "+", "xml_escape", "(", "str", "(", "key", ")", ")", "+", "'\">'", ")", "retval", ".", "append", "(", "_get_xml_value", "(", "value", ")", ")", "retval", ".", "append", "(", "'</child>'", ")", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "retval", ".", "append", "(", "xml_escape", "(", "str", "(", "value", ")", ".", "lower", "(", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "unicode", ")", ":", "retval", ".", "append", "(", "xml_escape", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")", ")", "else", ":", "retval", ".", "append", "(", "xml_escape", "(", "str", "(", "value", ")", ")", ")", "return", "\"\"", ".", "join", "(", "retval", ")" ]
Convert an individual value to an XML string. Calls itself recursively for dictionaries and lists. Uses some heuristics to convert the data to XML: - In dictionaries, the keys become the tag name. - In lists the tag name is 'child' with an order-attribute giving the list index. - All other values are included as is. All values are escaped to fit into the XML document. :param value: The value to convert to XML. :type value: Any valid Python value :rtype: string
[ "Convert", "an", "individual", "value", "to", "an", "XML", "string", ".", "Calls", "itself", "recursively", "for", "dictionaries", "and", "lists", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/xmlserializer.py#L30-L63
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/management/commands/pullapp.py
fetch_changes
def fetch_changes(repo_path, up_commit='master'): """ Fetch latest changes from stage and touch .timestamp if any python sources have been modified. """ last_up_commit = None prevcwd = os.getcwd() try: gitexe = 'git' os.chdir(repo_path) old_sources_timestamp = sources_latest_timestamp('.') shell_command([gitexe, 'pull']) last_up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) shell_command([gitexe, 'checkout', up_commit]) up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) new_sources_timestamp = sources_latest_timestamp('.') if old_sources_timestamp < new_sources_timestamp: with open('.timestamp', 'w') as up_commit_file: up_commit_file.write(up_commit) finally: os.chdir(prevcwd) return last_up_commit, up_commit
python
def fetch_changes(repo_path, up_commit='master'): """ Fetch latest changes from stage and touch .timestamp if any python sources have been modified. """ last_up_commit = None prevcwd = os.getcwd() try: gitexe = 'git' os.chdir(repo_path) old_sources_timestamp = sources_latest_timestamp('.') shell_command([gitexe, 'pull']) last_up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) shell_command([gitexe, 'checkout', up_commit]) up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) new_sources_timestamp = sources_latest_timestamp('.') if old_sources_timestamp < new_sources_timestamp: with open('.timestamp', 'w') as up_commit_file: up_commit_file.write(up_commit) finally: os.chdir(prevcwd) return last_up_commit, up_commit
[ "def", "fetch_changes", "(", "repo_path", ",", "up_commit", "=", "'master'", ")", ":", "last_up_commit", "=", "None", "prevcwd", "=", "os", ".", "getcwd", "(", ")", "try", ":", "gitexe", "=", "'git'", "os", ".", "chdir", "(", "repo_path", ")", "old_sources_timestamp", "=", "sources_latest_timestamp", "(", "'.'", ")", "shell_command", "(", "[", "gitexe", ",", "'pull'", "]", ")", "last_up_commit", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'rev-parse'", ",", "'HEAD'", "]", ")", "shell_command", "(", "[", "gitexe", ",", "'checkout'", ",", "up_commit", "]", ")", "up_commit", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'rev-parse'", ",", "'HEAD'", "]", ")", "new_sources_timestamp", "=", "sources_latest_timestamp", "(", "'.'", ")", "if", "old_sources_timestamp", "<", "new_sources_timestamp", ":", "with", "open", "(", "'.timestamp'", ",", "'w'", ")", "as", "up_commit_file", ":", "up_commit_file", ".", "write", "(", "up_commit", ")", "finally", ":", "os", ".", "chdir", "(", "prevcwd", ")", "return", "last_up_commit", ",", "up_commit" ]
Fetch latest changes from stage and touch .timestamp if any python sources have been modified.
[ "Fetch", "latest", "changes", "from", "stage", "and", "touch", ".", "timestamp", "if", "any", "python", "sources", "have", "been", "modified", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/management/commands/pullapp.py#L114-L135
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/management/commands/pullapp.py
migrate_all
def migrate_all(): """ Create schema migrations for all apps specified in INSTALLED_APPS, then run a migrate command. """ if 'south' in settings.INSTALLED_APPS: return _south_migrate_all() from django.core.management.commands import makemigrations, migrate schema_args = [sys.executable, 'makemigrations'] for app in settings.INSTALLED_APPS: if not app.startswith('django'): schema_args += [app] schema_cmd = makemigrations.Command() schema_cmd.run_from_argv(schema_args) migrate_cmd = migrate.Command() sys.stderr.write("MIGRATE ALL!\n") return migrate_cmd.run_from_argv([sys.executable, 'migrate'])
python
def migrate_all(): """ Create schema migrations for all apps specified in INSTALLED_APPS, then run a migrate command. """ if 'south' in settings.INSTALLED_APPS: return _south_migrate_all() from django.core.management.commands import makemigrations, migrate schema_args = [sys.executable, 'makemigrations'] for app in settings.INSTALLED_APPS: if not app.startswith('django'): schema_args += [app] schema_cmd = makemigrations.Command() schema_cmd.run_from_argv(schema_args) migrate_cmd = migrate.Command() sys.stderr.write("MIGRATE ALL!\n") return migrate_cmd.run_from_argv([sys.executable, 'migrate'])
[ "def", "migrate_all", "(", ")", ":", "if", "'south'", "in", "settings", ".", "INSTALLED_APPS", ":", "return", "_south_migrate_all", "(", ")", "from", "django", ".", "core", ".", "management", ".", "commands", "import", "makemigrations", ",", "migrate", "schema_args", "=", "[", "sys", ".", "executable", ",", "'makemigrations'", "]", "for", "app", "in", "settings", ".", "INSTALLED_APPS", ":", "if", "not", "app", ".", "startswith", "(", "'django'", ")", ":", "schema_args", "+=", "[", "app", "]", "schema_cmd", "=", "makemigrations", ".", "Command", "(", ")", "schema_cmd", ".", "run_from_argv", "(", "schema_args", ")", "migrate_cmd", "=", "migrate", ".", "Command", "(", ")", "sys", ".", "stderr", ".", "write", "(", "\"MIGRATE ALL!\\n\"", ")", "return", "migrate_cmd", ".", "run_from_argv", "(", "[", "sys", ".", "executable", ",", "'migrate'", "]", ")" ]
Create schema migrations for all apps specified in INSTALLED_APPS, then run a migrate command.
[ "Create", "schema", "migrations", "for", "all", "apps", "specified", "in", "INSTALLED_APPS", "then", "run", "a", "migrate", "command", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/management/commands/pullapp.py#L200-L217
train
carta/ldap_tools
src/ldap_tools/key.py
API.add
def add(self, username, user_api, filename=None): """ Add SSH public key to a user's profile. Args: username: Username to attach SSH public key to filename: Filename containing keys to add (optional) Raises: ldap3.core.exceptions.LDAPNoSuchAttributeResult: ldapPublicKey isn't attached to objectClass """ keys = API.__get_keys(filename) user = user_api.find(username)[0] distinguished_name = user.entry_dn if 'ldapPublicKey' not in user.objectClass: raise ldap3.core.exceptions.LDAPNoSuchAttributeResult( 'LDAP Public Key Object Class not found. ' + 'Please ensure user was created correctly.') else: for key in list(set(keys)): # prevents duplicate insertion print(key) try: SSHKey(key).parse() except Exception as err: raise err from None else: operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]} self.client.modify(distinguished_name, operation)
python
def add(self, username, user_api, filename=None): """ Add SSH public key to a user's profile. Args: username: Username to attach SSH public key to filename: Filename containing keys to add (optional) Raises: ldap3.core.exceptions.LDAPNoSuchAttributeResult: ldapPublicKey isn't attached to objectClass """ keys = API.__get_keys(filename) user = user_api.find(username)[0] distinguished_name = user.entry_dn if 'ldapPublicKey' not in user.objectClass: raise ldap3.core.exceptions.LDAPNoSuchAttributeResult( 'LDAP Public Key Object Class not found. ' + 'Please ensure user was created correctly.') else: for key in list(set(keys)): # prevents duplicate insertion print(key) try: SSHKey(key).parse() except Exception as err: raise err from None else: operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]} self.client.modify(distinguished_name, operation)
[ "def", "add", "(", "self", ",", "username", ",", "user_api", ",", "filename", "=", "None", ")", ":", "keys", "=", "API", ".", "__get_keys", "(", "filename", ")", "user", "=", "user_api", ".", "find", "(", "username", ")", "[", "0", "]", "distinguished_name", "=", "user", ".", "entry_dn", "if", "'ldapPublicKey'", "not", "in", "user", ".", "objectClass", ":", "raise", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPNoSuchAttributeResult", "(", "'LDAP Public Key Object Class not found. '", "+", "'Please ensure user was created correctly.'", ")", "else", ":", "for", "key", "in", "list", "(", "set", "(", "keys", ")", ")", ":", "# prevents duplicate insertion", "print", "(", "key", ")", "try", ":", "SSHKey", "(", "key", ")", ".", "parse", "(", ")", "except", "Exception", "as", "err", ":", "raise", "err", "from", "None", "else", ":", "operation", "=", "{", "'sshPublicKey'", ":", "[", "(", "ldap3", ".", "MODIFY_ADD", ",", "[", "key", "]", ")", "]", "}", "self", ".", "client", ".", "modify", "(", "distinguished_name", ",", "operation", ")" ]
Add SSH public key to a user's profile. Args: username: Username to attach SSH public key to filename: Filename containing keys to add (optional) Raises: ldap3.core.exceptions.LDAPNoSuchAttributeResult: ldapPublicKey isn't attached to objectClass
[ "Add", "SSH", "public", "key", "to", "a", "user", "s", "profile", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L22-L51
train
carta/ldap_tools
src/ldap_tools/key.py
API.remove
def remove(self, username, user_api, filename=None, force=False): """Remove specified SSH public key from specified user.""" self.keys = API.__get_keys(filename) self.username = username user = user_api.find(username)[0] if not force: # pragma: no cover self.__confirm() for key in self.__delete_keys(): operation = {'sshPublicKey': [(ldap3.MODIFY_DELETE, [key])]} self.client.modify(user.entry_dn, operation)
python
def remove(self, username, user_api, filename=None, force=False): """Remove specified SSH public key from specified user.""" self.keys = API.__get_keys(filename) self.username = username user = user_api.find(username)[0] if not force: # pragma: no cover self.__confirm() for key in self.__delete_keys(): operation = {'sshPublicKey': [(ldap3.MODIFY_DELETE, [key])]} self.client.modify(user.entry_dn, operation)
[ "def", "remove", "(", "self", ",", "username", ",", "user_api", ",", "filename", "=", "None", ",", "force", "=", "False", ")", ":", "self", ".", "keys", "=", "API", ".", "__get_keys", "(", "filename", ")", "self", ".", "username", "=", "username", "user", "=", "user_api", ".", "find", "(", "username", ")", "[", "0", "]", "if", "not", "force", ":", "# pragma: no cover", "self", ".", "__confirm", "(", ")", "for", "key", "in", "self", ".", "__delete_keys", "(", ")", ":", "operation", "=", "{", "'sshPublicKey'", ":", "[", "(", "ldap3", ".", "MODIFY_DELETE", ",", "[", "key", "]", ")", "]", "}", "self", ".", "client", ".", "modify", "(", "user", ".", "entry_dn", ",", "operation", ")" ]
Remove specified SSH public key from specified user.
[ "Remove", "specified", "SSH", "public", "key", "from", "specified", "user", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L53-L64
train
carta/ldap_tools
src/ldap_tools/key.py
API.get_keys_from_ldap
def get_keys_from_ldap(self, username=None): """ Fetch keys from ldap. Args: username Username associated with keys to fetch (optional) Returns: Array of dictionaries in '{username: [public keys]}' format """ result_dict = {} filter = ['(sshPublicKey=*)'] if username is not None: filter.append('(uid={})'.format(username)) attributes = ['uid', 'sshPublicKey'] results = self.client.search(filter, attributes) for result in results: result_dict[result.uid.value] = result.sshPublicKey.values return result_dict
python
def get_keys_from_ldap(self, username=None): """ Fetch keys from ldap. Args: username Username associated with keys to fetch (optional) Returns: Array of dictionaries in '{username: [public keys]}' format """ result_dict = {} filter = ['(sshPublicKey=*)'] if username is not None: filter.append('(uid={})'.format(username)) attributes = ['uid', 'sshPublicKey'] results = self.client.search(filter, attributes) for result in results: result_dict[result.uid.value] = result.sshPublicKey.values return result_dict
[ "def", "get_keys_from_ldap", "(", "self", ",", "username", "=", "None", ")", ":", "result_dict", "=", "{", "}", "filter", "=", "[", "'(sshPublicKey=*)'", "]", "if", "username", "is", "not", "None", ":", "filter", ".", "append", "(", "'(uid={})'", ".", "format", "(", "username", ")", ")", "attributes", "=", "[", "'uid'", ",", "'sshPublicKey'", "]", "results", "=", "self", ".", "client", ".", "search", "(", "filter", ",", "attributes", ")", "for", "result", "in", "results", ":", "result_dict", "[", "result", ".", "uid", ".", "value", "]", "=", "result", ".", "sshPublicKey", ".", "values", "return", "result_dict" ]
Fetch keys from ldap. Args: username Username associated with keys to fetch (optional) Returns: Array of dictionaries in '{username: [public keys]}' format
[ "Fetch", "keys", "from", "ldap", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L77-L96
train
carta/ldap_tools
src/ldap_tools/key.py
CLI.add
def add(config, username, filename): """Add user's SSH public key to their LDAP entry.""" try: client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.add(username, user_api, filename) except (ldap3.core.exceptions.LDAPNoSuchAttributeResult, ldap_tools.exceptions.InvalidResult, ldap3.core.exceptions.LDAPAttributeOrValueExistsResult ) as err: # pragma: no cover print('{}: {}'.format(type(err), err.args[0])) except Exception as err: # pragma: no cover raise err from None
python
def add(config, username, filename): """Add user's SSH public key to their LDAP entry.""" try: client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.add(username, user_api, filename) except (ldap3.core.exceptions.LDAPNoSuchAttributeResult, ldap_tools.exceptions.InvalidResult, ldap3.core.exceptions.LDAPAttributeOrValueExistsResult ) as err: # pragma: no cover print('{}: {}'.format(type(err), err.args[0])) except Exception as err: # pragma: no cover raise err from None
[ "def", "add", "(", "config", ",", "username", ",", "filename", ")", ":", "try", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "user_api", "=", "UserApi", "(", "client", ")", "key_api", "=", "API", "(", "client", ")", "key_api", ".", "add", "(", "username", ",", "user_api", ",", "filename", ")", "except", "(", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPNoSuchAttributeResult", ",", "ldap_tools", ".", "exceptions", ".", "InvalidResult", ",", "ldap3", ".", "core", ".", "exceptions", ".", "LDAPAttributeOrValueExistsResult", ")", "as", "err", ":", "# pragma: no cover", "print", "(", "'{}: {}'", ".", "format", "(", "type", "(", "err", ")", ",", "err", ".", "args", "[", "0", "]", ")", ")", "except", "Exception", "as", "err", ":", "# pragma: no cover", "raise", "err", "from", "None" ]
Add user's SSH public key to their LDAP entry.
[ "Add", "user", "s", "SSH", "public", "key", "to", "their", "LDAP", "entry", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L170-L184
train
carta/ldap_tools
src/ldap_tools/key.py
CLI.remove
def remove(config, username, filename, force): """Remove user's SSH public key from their LDAP entry.""" client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.remove(username, user_api, filename, force)
python
def remove(config, username, filename, force): """Remove user's SSH public key from their LDAP entry.""" client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.remove(username, user_api, filename, force)
[ "def", "remove", "(", "config", ",", "username", ",", "filename", ",", "force", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "user_api", "=", "UserApi", "(", "client", ")", "key_api", "=", "API", "(", "client", ")", "key_api", ".", "remove", "(", "username", ",", "user_api", ",", "filename", ",", "force", ")" ]
Remove user's SSH public key from their LDAP entry.
[ "Remove", "user", "s", "SSH", "public", "key", "from", "their", "LDAP", "entry", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L195-L201
train
carta/ldap_tools
src/ldap_tools/key.py
CLI.install
def install(config): # pragma: no cover """Install user's SSH public key to the local system.""" client = Client() client.prepare_connection() key_api = API(client) key_api.install()
python
def install(config): # pragma: no cover """Install user's SSH public key to the local system.""" client = Client() client.prepare_connection() key_api = API(client) key_api.install()
[ "def", "install", "(", "config", ")", ":", "# pragma: no cover", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "key_api", "=", "API", "(", "client", ")", "key_api", ".", "install", "(", ")" ]
Install user's SSH public key to the local system.
[ "Install", "user", "s", "SSH", "public", "key", "to", "the", "local", "system", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L205-L210
train
carta/ldap_tools
src/ldap_tools/key.py
CLI.show
def show(config, username): # pragma: no cover """Show a user's SSH public key from their LDAP entry.""" client = Client() client.prepare_connection() key_api = API(client) for key, value in key_api.get_keys_from_ldap(username).items(): print(value)
python
def show(config, username): # pragma: no cover """Show a user's SSH public key from their LDAP entry.""" client = Client() client.prepare_connection() key_api = API(client) for key, value in key_api.get_keys_from_ldap(username).items(): print(value)
[ "def", "show", "(", "config", ",", "username", ")", ":", "# pragma: no cover", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "key_api", "=", "API", "(", "client", ")", "for", "key", ",", "value", "in", "key_api", ".", "get_keys_from_ldap", "(", "username", ")", ".", "items", "(", ")", ":", "print", "(", "value", ")" ]
Show a user's SSH public key from their LDAP entry.
[ "Show", "a", "user", "s", "SSH", "public", "key", "from", "their", "LDAP", "entry", "." ]
7c039304a5abaf836c7afc35cf068b4471306264
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/key.py#L232-L238
train
openearth/mmi-python
mmi/runner_legacy.py
main
def main(): """run mmi runner""" logging.basicConfig() logger.info("mmi-runner") warnings.warn( "You are using the mmi-runner script, please switch to `mmi runner`", DeprecationWarning ) arguments = docopt.docopt(__doc__) kwargs = parse_args(arguments) runner = mmi.runner.Runner( **kwargs ) runner.run()
python
def main(): """run mmi runner""" logging.basicConfig() logger.info("mmi-runner") warnings.warn( "You are using the mmi-runner script, please switch to `mmi runner`", DeprecationWarning ) arguments = docopt.docopt(__doc__) kwargs = parse_args(arguments) runner = mmi.runner.Runner( **kwargs ) runner.run()
[ "def", "main", "(", ")", ":", "logging", ".", "basicConfig", "(", ")", "logger", ".", "info", "(", "\"mmi-runner\"", ")", "warnings", ".", "warn", "(", "\"You are using the mmi-runner script, please switch to `mmi runner`\"", ",", "DeprecationWarning", ")", "arguments", "=", "docopt", ".", "docopt", "(", "__doc__", ")", "kwargs", "=", "parse_args", "(", "arguments", ")", "runner", "=", "mmi", ".", "runner", ".", "Runner", "(", "*", "*", "kwargs", ")", "runner", ".", "run", "(", ")" ]
run mmi runner
[ "run", "mmi", "runner" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/runner_legacy.py#L34-L47
train
rgmining/common
setup.py
load_requires_from_file
def load_requires_from_file(filepath): """Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names. """ with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
python
def load_requires_from_file(filepath): """Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names. """ with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
[ "def", "load_requires_from_file", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ")", "as", "fp", ":", "return", "[", "pkg_name", ".", "strip", "(", ")", "for", "pkg_name", "in", "fp", ".", "readlines", "(", ")", "]" ]
Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names.
[ "Read", "a", "package", "list", "from", "a", "given", "file", "path", "." ]
2462a4d54f32a82eadd7b1e28675b3c8bcd172b2
https://github.com/rgmining/common/blob/2462a4d54f32a82eadd7b1e28675b3c8bcd172b2/setup.py#L34-L44
train
JoMingyu/TourAPI
tourapi/api.py
TourAPI.get_tour_list
def get_tour_list(self): """ Inquire all tour list :rtype: list """ resp = json.loads(urlopen(self.tour_list_url.format(1)).read().decode('utf-8')) total_count = resp['response']['body']['totalCount'] # Get total count resp = json.loads(urlopen(self.tour_list_url.format(total_count)).read().decode('utf-8')) data = resp['response']['body']['items']['item'] # Extract data list keychain = { 'contentid': ('content_id', None), 'contenttypeid': ('content_type_id', None), 'title': ('title', None), 'addr1': ('address', None), 'zipcode': ('zipcode', None), 'sigungucode': ('municipality', None), 'mapx': ('x', None), 'mapy': ('y', None), 'cat1': ('main_category', None), 'cat2': ('middle_category', None), 'cat3': ('small_category', None), 'readcount': ('views', 0), 'tel': ('tel', None), 'firstimage': ('image', None), } for tour in data: _dict_key_changer(tour, keychain) tour['creation_date'] = str(tour.pop('createdtime'))[:8] if 'createdtime' in tour else None tour['modified_date'] = str(tour.pop('modifiedtime'))[:8] if 'modifiedtime' in tour else None tour.pop('areacode', None) tour.pop('addr2', None) tour.pop('mlevel', None) # Manufacture return data
python
def get_tour_list(self): """ Inquire all tour list :rtype: list """ resp = json.loads(urlopen(self.tour_list_url.format(1)).read().decode('utf-8')) total_count = resp['response']['body']['totalCount'] # Get total count resp = json.loads(urlopen(self.tour_list_url.format(total_count)).read().decode('utf-8')) data = resp['response']['body']['items']['item'] # Extract data list keychain = { 'contentid': ('content_id', None), 'contenttypeid': ('content_type_id', None), 'title': ('title', None), 'addr1': ('address', None), 'zipcode': ('zipcode', None), 'sigungucode': ('municipality', None), 'mapx': ('x', None), 'mapy': ('y', None), 'cat1': ('main_category', None), 'cat2': ('middle_category', None), 'cat3': ('small_category', None), 'readcount': ('views', 0), 'tel': ('tel', None), 'firstimage': ('image', None), } for tour in data: _dict_key_changer(tour, keychain) tour['creation_date'] = str(tour.pop('createdtime'))[:8] if 'createdtime' in tour else None tour['modified_date'] = str(tour.pop('modifiedtime'))[:8] if 'modifiedtime' in tour else None tour.pop('areacode', None) tour.pop('addr2', None) tour.pop('mlevel', None) # Manufacture return data
[ "def", "get_tour_list", "(", "self", ")", ":", "resp", "=", "json", ".", "loads", "(", "urlopen", "(", "self", ".", "tour_list_url", ".", "format", "(", "1", ")", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "total_count", "=", "resp", "[", "'response'", "]", "[", "'body'", "]", "[", "'totalCount'", "]", "# Get total count", "resp", "=", "json", ".", "loads", "(", "urlopen", "(", "self", ".", "tour_list_url", ".", "format", "(", "total_count", ")", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "data", "=", "resp", "[", "'response'", "]", "[", "'body'", "]", "[", "'items'", "]", "[", "'item'", "]", "# Extract data list", "keychain", "=", "{", "'contentid'", ":", "(", "'content_id'", ",", "None", ")", ",", "'contenttypeid'", ":", "(", "'content_type_id'", ",", "None", ")", ",", "'title'", ":", "(", "'title'", ",", "None", ")", ",", "'addr1'", ":", "(", "'address'", ",", "None", ")", ",", "'zipcode'", ":", "(", "'zipcode'", ",", "None", ")", ",", "'sigungucode'", ":", "(", "'municipality'", ",", "None", ")", ",", "'mapx'", ":", "(", "'x'", ",", "None", ")", ",", "'mapy'", ":", "(", "'y'", ",", "None", ")", ",", "'cat1'", ":", "(", "'main_category'", ",", "None", ")", ",", "'cat2'", ":", "(", "'middle_category'", ",", "None", ")", ",", "'cat3'", ":", "(", "'small_category'", ",", "None", ")", ",", "'readcount'", ":", "(", "'views'", ",", "0", ")", ",", "'tel'", ":", "(", "'tel'", ",", "None", ")", ",", "'firstimage'", ":", "(", "'image'", ",", "None", ")", ",", "}", "for", "tour", "in", "data", ":", "_dict_key_changer", "(", "tour", ",", "keychain", ")", "tour", "[", "'creation_date'", "]", "=", "str", "(", "tour", ".", "pop", "(", "'createdtime'", ")", ")", "[", ":", "8", "]", "if", "'createdtime'", "in", "tour", "else", "None", "tour", "[", "'modified_date'", "]", "=", "str", "(", "tour", ".", "pop", "(", "'modifiedtime'", ")", ")", "[", ":", "8", "]", "if", "'modifiedtime'", "in", "tour", "else", "None", "tour", ".", "pop", "(", "'areacode'", ",", "None", ")", "tour", ".", "pop", "(", "'addr2'", ",", "None", ")", "tour", ".", "pop", "(", "'mlevel'", ",", "None", ")", "# Manufacture", "return", "data" ]
Inquire all tour list :rtype: list
[ "Inquire", "all", "tour", "list" ]
d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33
https://github.com/JoMingyu/TourAPI/blob/d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33/tourapi/api.py#L61-L103
train
JoMingyu/TourAPI
tourapi/api.py
TourAPI.get_detail_common
def get_detail_common(self, content_id): """ Inquire common detail data :param content_id: Content ID to inquire :type content_id: str :rtype: dict """ resp = json.loads(urlopen(self.detail_common_url.format(str(content_id))).read().decode('utf-8')) data = resp['response']['body']['items']['item'] # Extract data keychain = { 'contenttypeid': ('content_type_id', None), 'overview': ('overview', None), 'tel': ('tel', None), 'telname': ('tel_owner', None), 'booktour': ('in_book', 0) } _dict_key_changer(data, keychain) try: data['homepage'] = re.findall('http\w?://[\w|.]+', data.pop('homepage'))[0] if 'homepage' in data else None except IndexError: data['homepage'] = None data.pop('contentid', None) data.pop('title', None) data.pop('createdtime', None) data.pop('modifiedtime', None) # Manufacture return data
python
def get_detail_common(self, content_id): """ Inquire common detail data :param content_id: Content ID to inquire :type content_id: str :rtype: dict """ resp = json.loads(urlopen(self.detail_common_url.format(str(content_id))).read().decode('utf-8')) data = resp['response']['body']['items']['item'] # Extract data keychain = { 'contenttypeid': ('content_type_id', None), 'overview': ('overview', None), 'tel': ('tel', None), 'telname': ('tel_owner', None), 'booktour': ('in_book', 0) } _dict_key_changer(data, keychain) try: data['homepage'] = re.findall('http\w?://[\w|.]+', data.pop('homepage'))[0] if 'homepage' in data else None except IndexError: data['homepage'] = None data.pop('contentid', None) data.pop('title', None) data.pop('createdtime', None) data.pop('modifiedtime', None) # Manufacture return data
[ "def", "get_detail_common", "(", "self", ",", "content_id", ")", ":", "resp", "=", "json", ".", "loads", "(", "urlopen", "(", "self", ".", "detail_common_url", ".", "format", "(", "str", "(", "content_id", ")", ")", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "data", "=", "resp", "[", "'response'", "]", "[", "'body'", "]", "[", "'items'", "]", "[", "'item'", "]", "# Extract data", "keychain", "=", "{", "'contenttypeid'", ":", "(", "'content_type_id'", ",", "None", ")", ",", "'overview'", ":", "(", "'overview'", ",", "None", ")", ",", "'tel'", ":", "(", "'tel'", ",", "None", ")", ",", "'telname'", ":", "(", "'tel_owner'", ",", "None", ")", ",", "'booktour'", ":", "(", "'in_book'", ",", "0", ")", "}", "_dict_key_changer", "(", "data", ",", "keychain", ")", "try", ":", "data", "[", "'homepage'", "]", "=", "re", ".", "findall", "(", "'http\\w?://[\\w|.]+'", ",", "data", ".", "pop", "(", "'homepage'", ")", ")", "[", "0", "]", "if", "'homepage'", "in", "data", "else", "None", "except", "IndexError", ":", "data", "[", "'homepage'", "]", "=", "None", "data", ".", "pop", "(", "'contentid'", ",", "None", ")", "data", ".", "pop", "(", "'title'", ",", "None", ")", "data", ".", "pop", "(", "'createdtime'", ",", "None", ")", "data", ".", "pop", "(", "'modifiedtime'", ",", "None", ")", "# Manufacture", "return", "data" ]
Inquire common detail data :param content_id: Content ID to inquire :type content_id: str :rtype: dict
[ "Inquire", "common", "detail", "data" ]
d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33
https://github.com/JoMingyu/TourAPI/blob/d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33/tourapi/api.py#L105-L138
train
JoMingyu/TourAPI
tourapi/api.py
TourAPI.get_detail_images
def get_detail_images(self, content_id): """ Inquire detail images :param content_id: Content ID to inquire :type content_id: str :rtype: list """ resp = json.loads(urlopen(self.additional_images_url.format(content_id, 1)).read().decode('utf-8')) total_count = resp['response']['body']['totalCount'] # Get total count resp = json.loads(urlopen(self.additional_images_url.format(content_id, total_count)).read().decode('utf-8')) try: data = resp['response']['body']['items']['item'] # Extract data list if type(data) is dict: data.pop('contentid', None) data.pop('serialnum', None) data['origin'] = data.pop('originimgurl', None) data['small'] = data.pop('smallimageurl', None) # Manufacture else: for img in data: if type(img) is dict: img.pop('contentid', None) img.pop('serialnum', None) img['origin'] = img.pop('originimgurl', None) img['small'] = img.pop('smallimageurl', None) # Manufacture else: del img return data if type(data) is list else [data] except TypeError: return None
python
def get_detail_images(self, content_id): """ Inquire detail images :param content_id: Content ID to inquire :type content_id: str :rtype: list """ resp = json.loads(urlopen(self.additional_images_url.format(content_id, 1)).read().decode('utf-8')) total_count = resp['response']['body']['totalCount'] # Get total count resp = json.loads(urlopen(self.additional_images_url.format(content_id, total_count)).read().decode('utf-8')) try: data = resp['response']['body']['items']['item'] # Extract data list if type(data) is dict: data.pop('contentid', None) data.pop('serialnum', None) data['origin'] = data.pop('originimgurl', None) data['small'] = data.pop('smallimageurl', None) # Manufacture else: for img in data: if type(img) is dict: img.pop('contentid', None) img.pop('serialnum', None) img['origin'] = img.pop('originimgurl', None) img['small'] = img.pop('smallimageurl', None) # Manufacture else: del img return data if type(data) is list else [data] except TypeError: return None
[ "def", "get_detail_images", "(", "self", ",", "content_id", ")", ":", "resp", "=", "json", ".", "loads", "(", "urlopen", "(", "self", ".", "additional_images_url", ".", "format", "(", "content_id", ",", "1", ")", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "total_count", "=", "resp", "[", "'response'", "]", "[", "'body'", "]", "[", "'totalCount'", "]", "# Get total count", "resp", "=", "json", ".", "loads", "(", "urlopen", "(", "self", ".", "additional_images_url", ".", "format", "(", "content_id", ",", "total_count", ")", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "try", ":", "data", "=", "resp", "[", "'response'", "]", "[", "'body'", "]", "[", "'items'", "]", "[", "'item'", "]", "# Extract data list", "if", "type", "(", "data", ")", "is", "dict", ":", "data", ".", "pop", "(", "'contentid'", ",", "None", ")", "data", ".", "pop", "(", "'serialnum'", ",", "None", ")", "data", "[", "'origin'", "]", "=", "data", ".", "pop", "(", "'originimgurl'", ",", "None", ")", "data", "[", "'small'", "]", "=", "data", ".", "pop", "(", "'smallimageurl'", ",", "None", ")", "# Manufacture", "else", ":", "for", "img", "in", "data", ":", "if", "type", "(", "img", ")", "is", "dict", ":", "img", ".", "pop", "(", "'contentid'", ",", "None", ")", "img", ".", "pop", "(", "'serialnum'", ",", "None", ")", "img", "[", "'origin'", "]", "=", "img", ".", "pop", "(", "'originimgurl'", ",", "None", ")", "img", "[", "'small'", "]", "=", "img", ".", "pop", "(", "'smallimageurl'", ",", "None", ")", "# Manufacture", "else", ":", "del", "img", "return", "data", "if", "type", "(", "data", ")", "is", "list", "else", "[", "data", "]", "except", "TypeError", ":", "return", "None" ]
Inquire detail images :param content_id: Content ID to inquire :type content_id: str :rtype: list
[ "Inquire", "detail", "images" ]
d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33
https://github.com/JoMingyu/TourAPI/blob/d4b2a6415e01efaa0ddddd55126e2f4ae9d19d33/tourapi/api.py#L325-L361
train
mgoral/subconvert
src/subconvert/utils/SubFile.py
File._writeFile
def _writeFile(cls, filePath, content, encoding = None): """Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).""" filePath = os.path.realpath(filePath) log.debug(_("Real file path to write: %s" % filePath)) if encoding is None: encoding = File.DEFAULT_ENCODING try: encodedContent = ''.join(content).encode(encoding) except LookupError as msg: raise SubFileError(_("Unknown encoding name: '%s'.") % encoding) except UnicodeEncodeError: raise SubFileError( _("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.") % {"file": filePath, "enc": encoding}) tmpFilePath = "%s.tmp" % filePath bakFilePath = "%s.bak" % filePath with open(tmpFilePath, 'wb') as f: f.write(encodedContent) # ensure that all data is on disk. # for performance reasons, we skip os.fsync(f.fileno()) f.flush() try: os.rename(filePath, bakFilePath) except FileNotFoundError: # there's nothing to move when filePath doesn't exist # note the Python bug: http://bugs.python.org/issue16074 pass os.rename(tmpFilePath, filePath) try: os.unlink(bakFilePath) except FileNotFoundError: pass
python
def _writeFile(cls, filePath, content, encoding = None): """Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).""" filePath = os.path.realpath(filePath) log.debug(_("Real file path to write: %s" % filePath)) if encoding is None: encoding = File.DEFAULT_ENCODING try: encodedContent = ''.join(content).encode(encoding) except LookupError as msg: raise SubFileError(_("Unknown encoding name: '%s'.") % encoding) except UnicodeEncodeError: raise SubFileError( _("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.") % {"file": filePath, "enc": encoding}) tmpFilePath = "%s.tmp" % filePath bakFilePath = "%s.bak" % filePath with open(tmpFilePath, 'wb') as f: f.write(encodedContent) # ensure that all data is on disk. # for performance reasons, we skip os.fsync(f.fileno()) f.flush() try: os.rename(filePath, bakFilePath) except FileNotFoundError: # there's nothing to move when filePath doesn't exist # note the Python bug: http://bugs.python.org/issue16074 pass os.rename(tmpFilePath, filePath) try: os.unlink(bakFilePath) except FileNotFoundError: pass
[ "def", "_writeFile", "(", "cls", ",", "filePath", ",", "content", ",", "encoding", "=", "None", ")", ":", "filePath", "=", "os", ".", "path", ".", "realpath", "(", "filePath", ")", "log", ".", "debug", "(", "_", "(", "\"Real file path to write: %s\"", "%", "filePath", ")", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "File", ".", "DEFAULT_ENCODING", "try", ":", "encodedContent", "=", "''", ".", "join", "(", "content", ")", ".", "encode", "(", "encoding", ")", "except", "LookupError", "as", "msg", ":", "raise", "SubFileError", "(", "_", "(", "\"Unknown encoding name: '%s'.\"", ")", "%", "encoding", ")", "except", "UnicodeEncodeError", ":", "raise", "SubFileError", "(", "_", "(", "\"There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.\"", ")", "%", "{", "\"file\"", ":", "filePath", ",", "\"enc\"", ":", "encoding", "}", ")", "tmpFilePath", "=", "\"%s.tmp\"", "%", "filePath", "bakFilePath", "=", "\"%s.bak\"", "%", "filePath", "with", "open", "(", "tmpFilePath", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "encodedContent", ")", "# ensure that all data is on disk.", "# for performance reasons, we skip os.fsync(f.fileno())", "f", ".", "flush", "(", ")", "try", ":", "os", ".", "rename", "(", "filePath", ",", "bakFilePath", ")", "except", "FileNotFoundError", ":", "# there's nothing to move when filePath doesn't exist", "# note the Python bug: http://bugs.python.org/issue16074", "pass", "os", ".", "rename", "(", "tmpFilePath", ",", "filePath", ")", "try", ":", "os", ".", "unlink", "(", "bakFilePath", ")", "except", "FileNotFoundError", ":", "pass" ]
Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).
[ "Safe", "file", "writing", ".", "Most", "common", "mistakes", "are", "checked", "against", "and", "reported", "before", "write", "operation", ".", "After", "that", "if", "anything", "unexpected", "happens", "user", "won", "t", "be", "left", "without", "data", "or", "with", "corrupted", "one", "as", "this", "method", "writes", "to", "a", "temporary", "file", "and", "then", "simply", "renames", "it", "(", "which", "should", "be", "atomic", "operation", "according", "to", "POSIX", "but", "who", "knows", "how", "Ext4", "really", "works", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/utils/SubFile.py#L117-L159
train
skioo/django-customer-billing
billing/actions/invoices.py
pay_with_account_credit_cards
def pay_with_account_credit_cards(invoice_id) -> Optional[Transaction]: """ Get paid for the invoice, trying the valid credit cards on record for the account. If successful attaches the payment to the invoice and marks the invoice as paid. :param invoice_id: the id of the invoice to pay. :return: A successful transaction, or None if we weren't able to pay the invoice. """ logger.debug('invoice-payment-started', invoice_id=invoice_id) with transaction.atomic(): invoice = Invoice.objects.select_for_update().get(pk=invoice_id) # # Precondition: Invoice should be in a state that allows payment # if not invoice.in_payable_state: raise PreconditionError('Cannot pay invoice with status {}.'.format(invoice.status)) # # Precondition: The due amount must be positive, in a single currency # due = invoice.due().monies() if len(due) == 0: raise PreconditionError('Cannot pay empty invoice.') if len(due) > 1: raise PreconditionError('Cannot pay invoice with more than one currency.') amount = due[0] if amount.amount <= 0: raise PreconditionError('Cannot pay invoice with non-positive amount.') # # Try valid credit cards until one works. Start with the active ones # valid_credit_cards = CreditCard.objects.valid().filter(account=invoice.account).order_by('status') if not valid_credit_cards: raise PreconditionError('No valid credit card on account.') for credit_card in valid_credit_cards: try: success, payment_psp_object = psp.charge_credit_card( credit_card_psp_object=credit_card.psp_object, amount=amount, client_ref=str(invoice_id)) payment = Transaction.objects.create( account=invoice.account, invoice=invoice, amount=amount, success=success, payment_method=credit_card.type, credit_card_number=credit_card.number, psp_object=payment_psp_object) if success: invoice.pay() invoice.save() logger.info('invoice-payment-success', invoice=invoice_id, payment=payment) return payment else: logger.info('invoice-payment-failure', invoice=invoice_id, payment=payment) except Exception as e: logger.error('invoice-payment-error', invoice_id=invoice_id, credit_card=credit_card, exc_info=e) return None
python
def pay_with_account_credit_cards(invoice_id) -> Optional[Transaction]: """ Get paid for the invoice, trying the valid credit cards on record for the account. If successful attaches the payment to the invoice and marks the invoice as paid. :param invoice_id: the id of the invoice to pay. :return: A successful transaction, or None if we weren't able to pay the invoice. """ logger.debug('invoice-payment-started', invoice_id=invoice_id) with transaction.atomic(): invoice = Invoice.objects.select_for_update().get(pk=invoice_id) # # Precondition: Invoice should be in a state that allows payment # if not invoice.in_payable_state: raise PreconditionError('Cannot pay invoice with status {}.'.format(invoice.status)) # # Precondition: The due amount must be positive, in a single currency # due = invoice.due().monies() if len(due) == 0: raise PreconditionError('Cannot pay empty invoice.') if len(due) > 1: raise PreconditionError('Cannot pay invoice with more than one currency.') amount = due[0] if amount.amount <= 0: raise PreconditionError('Cannot pay invoice with non-positive amount.') # # Try valid credit cards until one works. Start with the active ones # valid_credit_cards = CreditCard.objects.valid().filter(account=invoice.account).order_by('status') if not valid_credit_cards: raise PreconditionError('No valid credit card on account.') for credit_card in valid_credit_cards: try: success, payment_psp_object = psp.charge_credit_card( credit_card_psp_object=credit_card.psp_object, amount=amount, client_ref=str(invoice_id)) payment = Transaction.objects.create( account=invoice.account, invoice=invoice, amount=amount, success=success, payment_method=credit_card.type, credit_card_number=credit_card.number, psp_object=payment_psp_object) if success: invoice.pay() invoice.save() logger.info('invoice-payment-success', invoice=invoice_id, payment=payment) return payment else: logger.info('invoice-payment-failure', invoice=invoice_id, payment=payment) except Exception as e: logger.error('invoice-payment-error', invoice_id=invoice_id, credit_card=credit_card, exc_info=e) return None
[ "def", "pay_with_account_credit_cards", "(", "invoice_id", ")", "->", "Optional", "[", "Transaction", "]", ":", "logger", ".", "debug", "(", "'invoice-payment-started'", ",", "invoice_id", "=", "invoice_id", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "invoice", "=", "Invoice", ".", "objects", ".", "select_for_update", "(", ")", ".", "get", "(", "pk", "=", "invoice_id", ")", "#", "# Precondition: Invoice should be in a state that allows payment", "#", "if", "not", "invoice", ".", "in_payable_state", ":", "raise", "PreconditionError", "(", "'Cannot pay invoice with status {}.'", ".", "format", "(", "invoice", ".", "status", ")", ")", "#", "# Precondition: The due amount must be positive, in a single currency", "#", "due", "=", "invoice", ".", "due", "(", ")", ".", "monies", "(", ")", "if", "len", "(", "due", ")", "==", "0", ":", "raise", "PreconditionError", "(", "'Cannot pay empty invoice.'", ")", "if", "len", "(", "due", ")", ">", "1", ":", "raise", "PreconditionError", "(", "'Cannot pay invoice with more than one currency.'", ")", "amount", "=", "due", "[", "0", "]", "if", "amount", ".", "amount", "<=", "0", ":", "raise", "PreconditionError", "(", "'Cannot pay invoice with non-positive amount.'", ")", "#", "# Try valid credit cards until one works. Start with the active ones", "#", "valid_credit_cards", "=", "CreditCard", ".", "objects", ".", "valid", "(", ")", ".", "filter", "(", "account", "=", "invoice", ".", "account", ")", ".", "order_by", "(", "'status'", ")", "if", "not", "valid_credit_cards", ":", "raise", "PreconditionError", "(", "'No valid credit card on account.'", ")", "for", "credit_card", "in", "valid_credit_cards", ":", "try", ":", "success", ",", "payment_psp_object", "=", "psp", ".", "charge_credit_card", "(", "credit_card_psp_object", "=", "credit_card", ".", "psp_object", ",", "amount", "=", "amount", ",", "client_ref", "=", "str", "(", "invoice_id", ")", ")", "payment", "=", "Transaction", ".", "objects", ".", "create", "(", "account", "=", "invoice", ".", "account", ",", "invoice", "=", "invoice", ",", "amount", "=", "amount", ",", "success", "=", "success", ",", "payment_method", "=", "credit_card", ".", "type", ",", "credit_card_number", "=", "credit_card", ".", "number", ",", "psp_object", "=", "payment_psp_object", ")", "if", "success", ":", "invoice", ".", "pay", "(", ")", "invoice", ".", "save", "(", ")", "logger", ".", "info", "(", "'invoice-payment-success'", ",", "invoice", "=", "invoice_id", ",", "payment", "=", "payment", ")", "return", "payment", "else", ":", "logger", ".", "info", "(", "'invoice-payment-failure'", ",", "invoice", "=", "invoice_id", ",", "payment", "=", "payment", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'invoice-payment-error'", ",", "invoice_id", "=", "invoice_id", ",", "credit_card", "=", "credit_card", ",", "exc_info", "=", "e", ")", "return", "None" ]
Get paid for the invoice, trying the valid credit cards on record for the account. If successful attaches the payment to the invoice and marks the invoice as paid. :param invoice_id: the id of the invoice to pay. :return: A successful transaction, or None if we weren't able to pay the invoice.
[ "Get", "paid", "for", "the", "invoice", "trying", "the", "valid", "credit", "cards", "on", "record", "for", "the", "account", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/invoices.py#L16-L77
train
mgoral/subconvert
src/subconvert/gui/ToolBox.py
ToolBox.setContentFor
def setContentFor(self, widget): """Updates toolbox contents with a data corresponding to a given tab.""" for i in range(self.count()): item = self.widget(i) if widget.isStatic: item.setStaticContent(widget) else: item.setContent(widget)
python
def setContentFor(self, widget): """Updates toolbox contents with a data corresponding to a given tab.""" for i in range(self.count()): item = self.widget(i) if widget.isStatic: item.setStaticContent(widget) else: item.setContent(widget)
[ "def", "setContentFor", "(", "self", ",", "widget", ")", ":", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", ")", ":", "item", "=", "self", ".", "widget", "(", "i", ")", "if", "widget", ".", "isStatic", ":", "item", ".", "setStaticContent", "(", "widget", ")", "else", ":", "item", ".", "setContent", "(", "widget", ")" ]
Updates toolbox contents with a data corresponding to a given tab.
[ "Updates", "toolbox", "contents", "with", "a", "data", "corresponding", "to", "a", "given", "tab", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/ToolBox.py#L37-L44
train
mgoral/subconvert
src/subconvert/gui/ToolBox.py
Tool.clear
def clear(self): """Removes all child widgets.""" layout = self.layout() for index in reversed(range(layout.count())): item = layout.takeAt(index) try: item.widget().deleteLater() except AttributeError: item = None
python
def clear(self): """Removes all child widgets.""" layout = self.layout() for index in reversed(range(layout.count())): item = layout.takeAt(index) try: item.widget().deleteLater() except AttributeError: item = None
[ "def", "clear", "(", "self", ")", ":", "layout", "=", "self", ".", "layout", "(", ")", "for", "index", "in", "reversed", "(", "range", "(", "layout", ".", "count", "(", ")", ")", ")", ":", "item", "=", "layout", ".", "takeAt", "(", "index", ")", "try", ":", "item", ".", "widget", "(", ")", ".", "deleteLater", "(", ")", "except", "AttributeError", ":", "item", "=", "None" ]
Removes all child widgets.
[ "Removes", "all", "child", "widgets", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/ToolBox.py#L79-L87
train
stylight/python-fastbill
fastbill/wrapper.py
FastbillWrapper._request
def _request(self, service, **kw): """Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError. """ fb_request = { 'service': service, } for key in ['limit', 'offset', 'filter', 'data']: fb_request[key] = kw.pop(key, None) if kw: raise _exc.FastbillRequestError("Unknown arguments: %s" % ", ".join(kw.keys())) data = _jsonencoder.dumps(fb_request) _logger.debug("Sending data: %r", data) self._pre_request_callback(service, fb_request) # TODO: Retry when we hit a 404 (api not found). Probably a deploy. http_resp = self.session.post(self.SERVICE_URL, auth=self.auth, headers=self.headers, timeout=self.timeout, data=data) self._post_request_callback(service, fb_request, http_resp) try: json_resp = http_resp.json() except ValueError: _logger.debug("Got data: %r", http_resp.content) _abort_http(service, http_resp) return # to make PyCharm happy else: _logger.debug("Got data: %r", json_resp) errors = json_resp['RESPONSE'].get('ERRORS') if errors: _abort_api(service, json_resp, errors) # If Fastbill should ever remove the REQUEST or SERVICE section # from their responses, just remove the checks. if json_resp['REQUEST']['SERVICE'] != service: raise _exc.FastbillError( "API Error: Got response from wrong service.") return _response.FastbillResponse(json_resp['RESPONSE'], self)
python
def _request(self, service, **kw): """Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError. """ fb_request = { 'service': service, } for key in ['limit', 'offset', 'filter', 'data']: fb_request[key] = kw.pop(key, None) if kw: raise _exc.FastbillRequestError("Unknown arguments: %s" % ", ".join(kw.keys())) data = _jsonencoder.dumps(fb_request) _logger.debug("Sending data: %r", data) self._pre_request_callback(service, fb_request) # TODO: Retry when we hit a 404 (api not found). Probably a deploy. http_resp = self.session.post(self.SERVICE_URL, auth=self.auth, headers=self.headers, timeout=self.timeout, data=data) self._post_request_callback(service, fb_request, http_resp) try: json_resp = http_resp.json() except ValueError: _logger.debug("Got data: %r", http_resp.content) _abort_http(service, http_resp) return # to make PyCharm happy else: _logger.debug("Got data: %r", json_resp) errors = json_resp['RESPONSE'].get('ERRORS') if errors: _abort_api(service, json_resp, errors) # If Fastbill should ever remove the REQUEST or SERVICE section # from their responses, just remove the checks. if json_resp['REQUEST']['SERVICE'] != service: raise _exc.FastbillError( "API Error: Got response from wrong service.") return _response.FastbillResponse(json_resp['RESPONSE'], self)
[ "def", "_request", "(", "self", ",", "service", ",", "*", "*", "kw", ")", ":", "fb_request", "=", "{", "'service'", ":", "service", ",", "}", "for", "key", "in", "[", "'limit'", ",", "'offset'", ",", "'filter'", ",", "'data'", "]", ":", "fb_request", "[", "key", "]", "=", "kw", ".", "pop", "(", "key", ",", "None", ")", "if", "kw", ":", "raise", "_exc", ".", "FastbillRequestError", "(", "\"Unknown arguments: %s\"", "%", "\", \"", ".", "join", "(", "kw", ".", "keys", "(", ")", ")", ")", "data", "=", "_jsonencoder", ".", "dumps", "(", "fb_request", ")", "_logger", ".", "debug", "(", "\"Sending data: %r\"", ",", "data", ")", "self", ".", "_pre_request_callback", "(", "service", ",", "fb_request", ")", "# TODO: Retry when we hit a 404 (api not found). Probably a deploy.", "http_resp", "=", "self", ".", "session", ".", "post", "(", "self", ".", "SERVICE_URL", ",", "auth", "=", "self", ".", "auth", ",", "headers", "=", "self", ".", "headers", ",", "timeout", "=", "self", ".", "timeout", ",", "data", "=", "data", ")", "self", ".", "_post_request_callback", "(", "service", ",", "fb_request", ",", "http_resp", ")", "try", ":", "json_resp", "=", "http_resp", ".", "json", "(", ")", "except", "ValueError", ":", "_logger", ".", "debug", "(", "\"Got data: %r\"", ",", "http_resp", ".", "content", ")", "_abort_http", "(", "service", ",", "http_resp", ")", "return", "# to make PyCharm happy", "else", ":", "_logger", ".", "debug", "(", "\"Got data: %r\"", ",", "json_resp", ")", "errors", "=", "json_resp", "[", "'RESPONSE'", "]", ".", "get", "(", "'ERRORS'", ")", "if", "errors", ":", "_abort_api", "(", "service", ",", "json_resp", ",", "errors", ")", "# If Fastbill should ever remove the REQUEST or SERVICE section", "# from their responses, just remove the checks.", "if", "json_resp", "[", "'REQUEST'", "]", "[", "'SERVICE'", "]", "!=", "service", ":", "raise", "_exc", ".", "FastbillError", "(", "\"API Error: Got response from wrong service.\"", ")", "return", "_response", ".", "FastbillResponse", "(", "json_resp", "[", "'RESPONSE'", "]", ",", "self", ")" ]
Do the actual request to Fastbill's API server. If successful returns the RESPONSE section the of response, in case of an error raises a subclass of FastbillError.
[ "Do", "the", "actual", "request", "to", "Fastbill", "s", "API", "server", "." ]
e0cea5cc931df4a7b64c2f877ff3b9a4cf56e5bc
https://github.com/stylight/python-fastbill/blob/e0cea5cc931df4a7b64c2f877ff3b9a4cf56e5bc/fastbill/wrapper.py#L117-L164
train
PythonOptimizers/cygenja
cygenja/treemap/treemap_node.py
TreeMapNode.set_parent
def set_parent(self, node): """ Attach node to its parent. Args: node: Parent node. Note: ``node`` can be ``None``. In that case, the node is detached from its previous parent. """ self._parent = node if node is None: # detach from parent self._depth = 0 else: self._depth = node.get_depth() + 1
python
def set_parent(self, node): """ Attach node to its parent. Args: node: Parent node. Note: ``node`` can be ``None``. In that case, the node is detached from its previous parent. """ self._parent = node if node is None: # detach from parent self._depth = 0 else: self._depth = node.get_depth() + 1
[ "def", "set_parent", "(", "self", ",", "node", ")", ":", "self", ".", "_parent", "=", "node", "if", "node", "is", "None", ":", "# detach from parent", "self", ".", "_depth", "=", "0", "else", ":", "self", ".", "_depth", "=", "node", ".", "get_depth", "(", ")", "+", "1" ]
Attach node to its parent. Args: node: Parent node. Note: ``node`` can be ``None``. In that case, the node is detached from its previous parent.
[ "Attach", "node", "to", "its", "parent", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/treemap_node.py#L35-L52
train
PythonOptimizers/cygenja
cygenja/treemap/treemap_node.py
TreeMapNode.generate_child_leaf_nodes
def generate_child_leaf_nodes(self): """ Generate leaf nodes of this node. """ def _yield_child_leaf_nodes(node): """ Args: node: Yields: """ if not node.has_children(): yield node else: for child_node in node.generate_child_nodes(): # recursivity is not compatible with yield in Python2.x: you have to re-yield results for child in _yield_child_leaf_nodes(child_node): yield child return _yield_child_leaf_nodes(self)
python
def generate_child_leaf_nodes(self): """ Generate leaf nodes of this node. """ def _yield_child_leaf_nodes(node): """ Args: node: Yields: """ if not node.has_children(): yield node else: for child_node in node.generate_child_nodes(): # recursivity is not compatible with yield in Python2.x: you have to re-yield results for child in _yield_child_leaf_nodes(child_node): yield child return _yield_child_leaf_nodes(self)
[ "def", "generate_child_leaf_nodes", "(", "self", ")", ":", "def", "_yield_child_leaf_nodes", "(", "node", ")", ":", "\"\"\"\n\n Args:\n node:\n\n Yields:\n \"\"\"", "if", "not", "node", ".", "has_children", "(", ")", ":", "yield", "node", "else", ":", "for", "child_node", "in", "node", ".", "generate_child_nodes", "(", ")", ":", "# recursivity is not compatible with yield in Python2.x: you have to re-yield results", "for", "child", "in", "_yield_child_leaf_nodes", "(", "child_node", ")", ":", "yield", "child", "return", "_yield_child_leaf_nodes", "(", "self", ")" ]
Generate leaf nodes of this node.
[ "Generate", "leaf", "nodes", "of", "this", "node", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/treemap_node.py#L149-L171
train
PythonOptimizers/cygenja
cygenja/treemap/treemap_node.py
TreeMapNode.detach_children
def detach_children(self): """ Erase references to children without deleting them. These children might be used somewhere else, otherwise they will be taken care by ``Python``'s garbage collector. """ for node in self.get_child_nodes(): node.set_parent(None) self._nodes = dict()
python
def detach_children(self): """ Erase references to children without deleting them. These children might be used somewhere else, otherwise they will be taken care by ``Python``'s garbage collector. """ for node in self.get_child_nodes(): node.set_parent(None) self._nodes = dict()
[ "def", "detach_children", "(", "self", ")", ":", "for", "node", "in", "self", ".", "get_child_nodes", "(", ")", ":", "node", ".", "set_parent", "(", "None", ")", "self", ".", "_nodes", "=", "dict", "(", ")" ]
Erase references to children without deleting them. These children might be used somewhere else, otherwise they will be taken care by ``Python``'s garbage collector.
[ "Erase", "references", "to", "children", "without", "deleting", "them", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/treemap_node.py#L173-L181
train
ngmarchant/oasis
oasis/experiments.py
process_expt
def process_expt(h5_path, inmemory = True, ignorenan = False): """ Assumes h5 file has table called `F_measure` Parameters ---------- h5_path : string path to HDF file containing the experimental data. The file is expected to have been generated from the `repeat_expt` function. inmemory : bool whether to process the experiments in memory ignorenan : bool whether to ignore NaNs when computing the mean and variance """ logging.info("Reading file at {}".format(h5_path)) h5_file = tables.open_file(h5_path, mode = 'r') F = h5_file.root.F_measure n_expt, n_labels, n_class = F.shape mean_n_iterations = np.sum(h5_file.root.n_iterations)/n_expt if hasattr(h5_file.root, 'CPU_time'): CPU_time = h5_file.root.CPU_time mean_CPU_time = np.mean(CPU_time) var_CPU_time = np.var(CPU_time) else: mean_CPU_time = None var_CPU_time = None mean_CPU_time_per_iteration = None F_mean = np.empty([n_labels, n_class], dtype='float') F_var = np.empty([n_labels, n_class], dtype='float') F_stderr = np.empty([n_labels, n_class], dtype='float') n_sample = np.empty(n_labels, dtype='int') if inmemory: F_mem = F[:,:,:] logging.info("Beginning processing".format()) for t in range(n_labels): if t%np.ceil(n_labels/10).astype(int) == 0: logging.info("Processed {} of {} experiments".format(t, n_labels)) if inmemory: temp = F_mem[:,t,:] else: temp = F[:,t,:] if ignorenan: n_sample[t] = np.sum(~np.isnan(temp)) # Expect to see RuntimeWarnings if array contains all NaNs with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) F_mean[t] = np.nanmean(temp, axis=0) F_var[t] = np.nanvar(temp, axis=0) F_stderr[t] = np.sqrt(F_var[t]/n_sample[t]) else: n_sample[t] = len(temp) F_mean[t] = np.mean(temp, axis=0) F_var[t] = np.var(temp, axis=0) F_stderr[t] = np.sqrt(F_var[t]/n_sample[t]) logging.info("Processing complete".format()) h5_file.close() return {'mean': F_mean, 'variance': F_var, 'std_error': F_stderr, 'n_samples': n_sample, 'n_expts': n_expt, 'n_labels': n_labels, 'mean_CPU_time': mean_CPU_time, 'var_CPU_time': var_CPU_time, 'mean_n_iterations': mean_n_iterations, 'h5_path': h5_path}
python
def process_expt(h5_path, inmemory = True, ignorenan = False): """ Assumes h5 file has table called `F_measure` Parameters ---------- h5_path : string path to HDF file containing the experimental data. The file is expected to have been generated from the `repeat_expt` function. inmemory : bool whether to process the experiments in memory ignorenan : bool whether to ignore NaNs when computing the mean and variance """ logging.info("Reading file at {}".format(h5_path)) h5_file = tables.open_file(h5_path, mode = 'r') F = h5_file.root.F_measure n_expt, n_labels, n_class = F.shape mean_n_iterations = np.sum(h5_file.root.n_iterations)/n_expt if hasattr(h5_file.root, 'CPU_time'): CPU_time = h5_file.root.CPU_time mean_CPU_time = np.mean(CPU_time) var_CPU_time = np.var(CPU_time) else: mean_CPU_time = None var_CPU_time = None mean_CPU_time_per_iteration = None F_mean = np.empty([n_labels, n_class], dtype='float') F_var = np.empty([n_labels, n_class], dtype='float') F_stderr = np.empty([n_labels, n_class], dtype='float') n_sample = np.empty(n_labels, dtype='int') if inmemory: F_mem = F[:,:,:] logging.info("Beginning processing".format()) for t in range(n_labels): if t%np.ceil(n_labels/10).astype(int) == 0: logging.info("Processed {} of {} experiments".format(t, n_labels)) if inmemory: temp = F_mem[:,t,:] else: temp = F[:,t,:] if ignorenan: n_sample[t] = np.sum(~np.isnan(temp)) # Expect to see RuntimeWarnings if array contains all NaNs with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) F_mean[t] = np.nanmean(temp, axis=0) F_var[t] = np.nanvar(temp, axis=0) F_stderr[t] = np.sqrt(F_var[t]/n_sample[t]) else: n_sample[t] = len(temp) F_mean[t] = np.mean(temp, axis=0) F_var[t] = np.var(temp, axis=0) F_stderr[t] = np.sqrt(F_var[t]/n_sample[t]) logging.info("Processing complete".format()) h5_file.close() return {'mean': F_mean, 'variance': F_var, 'std_error': F_stderr, 'n_samples': n_sample, 'n_expts': n_expt, 'n_labels': n_labels, 'mean_CPU_time': mean_CPU_time, 'var_CPU_time': var_CPU_time, 'mean_n_iterations': mean_n_iterations, 'h5_path': h5_path}
[ "def", "process_expt", "(", "h5_path", ",", "inmemory", "=", "True", ",", "ignorenan", "=", "False", ")", ":", "logging", ".", "info", "(", "\"Reading file at {}\"", ".", "format", "(", "h5_path", ")", ")", "h5_file", "=", "tables", ".", "open_file", "(", "h5_path", ",", "mode", "=", "'r'", ")", "F", "=", "h5_file", ".", "root", ".", "F_measure", "n_expt", ",", "n_labels", ",", "n_class", "=", "F", ".", "shape", "mean_n_iterations", "=", "np", ".", "sum", "(", "h5_file", ".", "root", ".", "n_iterations", ")", "/", "n_expt", "if", "hasattr", "(", "h5_file", ".", "root", ",", "'CPU_time'", ")", ":", "CPU_time", "=", "h5_file", ".", "root", ".", "CPU_time", "mean_CPU_time", "=", "np", ".", "mean", "(", "CPU_time", ")", "var_CPU_time", "=", "np", ".", "var", "(", "CPU_time", ")", "else", ":", "mean_CPU_time", "=", "None", "var_CPU_time", "=", "None", "mean_CPU_time_per_iteration", "=", "None", "F_mean", "=", "np", ".", "empty", "(", "[", "n_labels", ",", "n_class", "]", ",", "dtype", "=", "'float'", ")", "F_var", "=", "np", ".", "empty", "(", "[", "n_labels", ",", "n_class", "]", ",", "dtype", "=", "'float'", ")", "F_stderr", "=", "np", ".", "empty", "(", "[", "n_labels", ",", "n_class", "]", ",", "dtype", "=", "'float'", ")", "n_sample", "=", "np", ".", "empty", "(", "n_labels", ",", "dtype", "=", "'int'", ")", "if", "inmemory", ":", "F_mem", "=", "F", "[", ":", ",", ":", ",", ":", "]", "logging", ".", "info", "(", "\"Beginning processing\"", ".", "format", "(", ")", ")", "for", "t", "in", "range", "(", "n_labels", ")", ":", "if", "t", "%", "np", ".", "ceil", "(", "n_labels", "/", "10", ")", ".", "astype", "(", "int", ")", "==", "0", ":", "logging", ".", "info", "(", "\"Processed {} of {} experiments\"", ".", "format", "(", "t", ",", "n_labels", ")", ")", "if", "inmemory", ":", "temp", "=", "F_mem", "[", ":", ",", "t", ",", ":", "]", "else", ":", "temp", "=", "F", "[", ":", ",", "t", ",", ":", "]", "if", "ignorenan", ":", "n_sample", "[", "t", "]", "=", "np", ".", "sum", "(", "~", "np", ".", "isnan", "(", "temp", ")", ")", "# Expect to see RuntimeWarnings if array contains all NaNs", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "F_mean", "[", "t", "]", "=", "np", ".", "nanmean", "(", "temp", ",", "axis", "=", "0", ")", "F_var", "[", "t", "]", "=", "np", ".", "nanvar", "(", "temp", ",", "axis", "=", "0", ")", "F_stderr", "[", "t", "]", "=", "np", ".", "sqrt", "(", "F_var", "[", "t", "]", "/", "n_sample", "[", "t", "]", ")", "else", ":", "n_sample", "[", "t", "]", "=", "len", "(", "temp", ")", "F_mean", "[", "t", "]", "=", "np", ".", "mean", "(", "temp", ",", "axis", "=", "0", ")", "F_var", "[", "t", "]", "=", "np", ".", "var", "(", "temp", ",", "axis", "=", "0", ")", "F_stderr", "[", "t", "]", "=", "np", ".", "sqrt", "(", "F_var", "[", "t", "]", "/", "n_sample", "[", "t", "]", ")", "logging", ".", "info", "(", "\"Processing complete\"", ".", "format", "(", ")", ")", "h5_file", ".", "close", "(", ")", "return", "{", "'mean'", ":", "F_mean", ",", "'variance'", ":", "F_var", ",", "'std_error'", ":", "F_stderr", ",", "'n_samples'", ":", "n_sample", ",", "'n_expts'", ":", "n_expt", ",", "'n_labels'", ":", "n_labels", ",", "'mean_CPU_time'", ":", "mean_CPU_time", ",", "'var_CPU_time'", ":", "var_CPU_time", ",", "'mean_n_iterations'", ":", "mean_n_iterations", ",", "'h5_path'", ":", "h5_path", "}" ]
Assumes h5 file has table called `F_measure` Parameters ---------- h5_path : string path to HDF file containing the experimental data. The file is expected to have been generated from the `repeat_expt` function. inmemory : bool whether to process the experiments in memory ignorenan : bool whether to ignore NaNs when computing the mean and variance
[ "Assumes", "h5", "file", "has", "table", "called", "F_measure" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/experiments.py#L61-L138
train
ngmarchant/oasis
oasis/experiments.py
Data.calc_confusion_matrix
def calc_confusion_matrix(self, printout = False): """ Calculates number of TP, FP, TN, FN """ if self.labels is None: raise DataError("Cannot calculate confusion matrix before data " "has been read.") if self.preds is None: raise DataError("Predictions not available. Please run " "`scores_to_preds` before calculating confusion " "matrix") self.TP = np.sum(np.logical_and(self.preds == 1, self.labels == 1)) self.TN = np.sum(np.logical_and(self.preds == 0, self.labels == 0)) self.FP = np.sum(np.logical_and(self.preds == 1, self.labels == 0)) self.FN = np.sum(np.logical_and(self.preds == 0, self.labels == 1)) if printout: print("Contingency matrix is:") print("----------------------") print("TP: {} \t FN: {}".format(self.TP,self.FN)) print("FP: {} \t TN: {}".format(self.FP,self.TN)) print("\n")
python
def calc_confusion_matrix(self, printout = False): """ Calculates number of TP, FP, TN, FN """ if self.labels is None: raise DataError("Cannot calculate confusion matrix before data " "has been read.") if self.preds is None: raise DataError("Predictions not available. Please run " "`scores_to_preds` before calculating confusion " "matrix") self.TP = np.sum(np.logical_and(self.preds == 1, self.labels == 1)) self.TN = np.sum(np.logical_and(self.preds == 0, self.labels == 0)) self.FP = np.sum(np.logical_and(self.preds == 1, self.labels == 0)) self.FN = np.sum(np.logical_and(self.preds == 0, self.labels == 1)) if printout: print("Contingency matrix is:") print("----------------------") print("TP: {} \t FN: {}".format(self.TP,self.FN)) print("FP: {} \t TN: {}".format(self.FP,self.TN)) print("\n")
[ "def", "calc_confusion_matrix", "(", "self", ",", "printout", "=", "False", ")", ":", "if", "self", ".", "labels", "is", "None", ":", "raise", "DataError", "(", "\"Cannot calculate confusion matrix before data \"", "\"has been read.\"", ")", "if", "self", ".", "preds", "is", "None", ":", "raise", "DataError", "(", "\"Predictions not available. Please run \"", "\"`scores_to_preds` before calculating confusion \"", "\"matrix\"", ")", "self", ".", "TP", "=", "np", ".", "sum", "(", "np", ".", "logical_and", "(", "self", ".", "preds", "==", "1", ",", "self", ".", "labels", "==", "1", ")", ")", "self", ".", "TN", "=", "np", ".", "sum", "(", "np", ".", "logical_and", "(", "self", ".", "preds", "==", "0", ",", "self", ".", "labels", "==", "0", ")", ")", "self", ".", "FP", "=", "np", ".", "sum", "(", "np", ".", "logical_and", "(", "self", ".", "preds", "==", "1", ",", "self", ".", "labels", "==", "0", ")", ")", "self", ".", "FN", "=", "np", ".", "sum", "(", "np", ".", "logical_and", "(", "self", ".", "preds", "==", "0", ",", "self", ".", "labels", "==", "1", ")", ")", "if", "printout", ":", "print", "(", "\"Contingency matrix is:\"", ")", "print", "(", "\"----------------------\"", ")", "print", "(", "\"TP: {} \\t FN: {}\"", ".", "format", "(", "self", ".", "TP", ",", "self", ".", "FN", ")", ")", "print", "(", "\"FP: {} \\t TN: {}\"", ".", "format", "(", "self", ".", "FP", ",", "self", ".", "TN", ")", ")", "print", "(", "\"\\n\"", ")" ]
Calculates number of TP, FP, TN, FN
[ "Calculates", "number", "of", "TP", "FP", "TN", "FN" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/experiments.py#L216-L239
train
ngmarchant/oasis
oasis/experiments.py
Data.calc_true_performance
def calc_true_performance(self, printout = False): """ Evaluate precision, recall and balanced F-measure """ try: self.calc_confusion_matrix(printout = False) except DataError as e: print(e.msg) raise if self.TP + self.FP == 0: self.precision = np.nan else: self.precision = self.TP / (self.TP + self.FP) if self.TP + self.FN == 0: self.recall = np.nan else: self.recall = self.TP / (self.TP + self.FN) if self.precision + self.recall == 0: self.F1_measure = np.nan else: self.F1_measure = ( 2 * self.precision * self.recall / (self.precision + self.recall) ) if printout: print("True performance is:") print("--------------------") print("Precision: {} \t Recall: {} \t F1 measure: {}".format(self.precision, self.recall, self.F1_measure))
python
def calc_true_performance(self, printout = False): """ Evaluate precision, recall and balanced F-measure """ try: self.calc_confusion_matrix(printout = False) except DataError as e: print(e.msg) raise if self.TP + self.FP == 0: self.precision = np.nan else: self.precision = self.TP / (self.TP + self.FP) if self.TP + self.FN == 0: self.recall = np.nan else: self.recall = self.TP / (self.TP + self.FN) if self.precision + self.recall == 0: self.F1_measure = np.nan else: self.F1_measure = ( 2 * self.precision * self.recall / (self.precision + self.recall) ) if printout: print("True performance is:") print("--------------------") print("Precision: {} \t Recall: {} \t F1 measure: {}".format(self.precision, self.recall, self.F1_measure))
[ "def", "calc_true_performance", "(", "self", ",", "printout", "=", "False", ")", ":", "try", ":", "self", ".", "calc_confusion_matrix", "(", "printout", "=", "False", ")", "except", "DataError", "as", "e", ":", "print", "(", "e", ".", "msg", ")", "raise", "if", "self", ".", "TP", "+", "self", ".", "FP", "==", "0", ":", "self", ".", "precision", "=", "np", ".", "nan", "else", ":", "self", ".", "precision", "=", "self", ".", "TP", "/", "(", "self", ".", "TP", "+", "self", ".", "FP", ")", "if", "self", ".", "TP", "+", "self", ".", "FN", "==", "0", ":", "self", ".", "recall", "=", "np", ".", "nan", "else", ":", "self", ".", "recall", "=", "self", ".", "TP", "/", "(", "self", ".", "TP", "+", "self", ".", "FN", ")", "if", "self", ".", "precision", "+", "self", ".", "recall", "==", "0", ":", "self", ".", "F1_measure", "=", "np", ".", "nan", "else", ":", "self", ".", "F1_measure", "=", "(", "2", "*", "self", ".", "precision", "*", "self", ".", "recall", "/", "(", "self", ".", "precision", "+", "self", ".", "recall", ")", ")", "if", "printout", ":", "print", "(", "\"True performance is:\"", ")", "print", "(", "\"--------------------\"", ")", "print", "(", "\"Precision: {} \\t Recall: {} \\t F1 measure: {}\"", ".", "format", "(", "self", ".", "precision", ",", "self", ".", "recall", ",", "self", ".", "F1_measure", ")", ")" ]
Evaluate precision, recall and balanced F-measure
[ "Evaluate", "precision", "recall", "and", "balanced", "F", "-", "measure" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/experiments.py#L242-L271
train
praekeltfoundation/seed-message-sender
message_sender/migrations/0020_outboundsendfailure_db_backed_fk_constraint.py
modify_fk_constraint
def modify_fk_constraint(apps, schema_editor): """ Delete's the current foreign key contraint on the outbound field, and adds it again, but this time with an ON DELETE clause """ model = apps.get_model("message_sender", "OutboundSendFailure") table = model._meta.db_table with schema_editor.connection.cursor() as cursor: constraints = schema_editor.connection.introspection.get_constraints( cursor, table ) [constraint] = filter(lambda c: c[1]["foreign_key"], constraints.items()) [name, _] = constraint sql_delete_fk = ( "SET CONSTRAINTS {name} IMMEDIATE; " "ALTER TABLE {table} DROP CONSTRAINT {name}" ).format(table=schema_editor.quote_name(table), name=schema_editor.quote_name(name)) schema_editor.execute(sql_delete_fk) field = model.outbound.field to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column sql_create_fk = ( "ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY " "({column}) REFERENCES {to_table} ({to_column}) " "ON DELETE CASCADE {deferrable};" ).format( table=schema_editor.quote_name(table), name=schema_editor.quote_name(name), column=schema_editor.quote_name(field.column), to_table=schema_editor.quote_name(to_table), to_column=schema_editor.quote_name(to_column), deferrable=schema_editor.connection.ops.deferrable_sql(), ) schema_editor.execute(sql_create_fk)
python
def modify_fk_constraint(apps, schema_editor): """ Delete's the current foreign key contraint on the outbound field, and adds it again, but this time with an ON DELETE clause """ model = apps.get_model("message_sender", "OutboundSendFailure") table = model._meta.db_table with schema_editor.connection.cursor() as cursor: constraints = schema_editor.connection.introspection.get_constraints( cursor, table ) [constraint] = filter(lambda c: c[1]["foreign_key"], constraints.items()) [name, _] = constraint sql_delete_fk = ( "SET CONSTRAINTS {name} IMMEDIATE; " "ALTER TABLE {table} DROP CONSTRAINT {name}" ).format(table=schema_editor.quote_name(table), name=schema_editor.quote_name(name)) schema_editor.execute(sql_delete_fk) field = model.outbound.field to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column sql_create_fk = ( "ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY " "({column}) REFERENCES {to_table} ({to_column}) " "ON DELETE CASCADE {deferrable};" ).format( table=schema_editor.quote_name(table), name=schema_editor.quote_name(name), column=schema_editor.quote_name(field.column), to_table=schema_editor.quote_name(to_table), to_column=schema_editor.quote_name(to_column), deferrable=schema_editor.connection.ops.deferrable_sql(), ) schema_editor.execute(sql_create_fk)
[ "def", "modify_fk_constraint", "(", "apps", ",", "schema_editor", ")", ":", "model", "=", "apps", ".", "get_model", "(", "\"message_sender\"", ",", "\"OutboundSendFailure\"", ")", "table", "=", "model", ".", "_meta", ".", "db_table", "with", "schema_editor", ".", "connection", ".", "cursor", "(", ")", "as", "cursor", ":", "constraints", "=", "schema_editor", ".", "connection", ".", "introspection", ".", "get_constraints", "(", "cursor", ",", "table", ")", "[", "constraint", "]", "=", "filter", "(", "lambda", "c", ":", "c", "[", "1", "]", "[", "\"foreign_key\"", "]", ",", "constraints", ".", "items", "(", ")", ")", "[", "name", ",", "_", "]", "=", "constraint", "sql_delete_fk", "=", "(", "\"SET CONSTRAINTS {name} IMMEDIATE; \"", "\"ALTER TABLE {table} DROP CONSTRAINT {name}\"", ")", ".", "format", "(", "table", "=", "schema_editor", ".", "quote_name", "(", "table", ")", ",", "name", "=", "schema_editor", ".", "quote_name", "(", "name", ")", ")", "schema_editor", ".", "execute", "(", "sql_delete_fk", ")", "field", "=", "model", ".", "outbound", ".", "field", "to_table", "=", "field", ".", "remote_field", ".", "model", ".", "_meta", ".", "db_table", "to_column", "=", "field", ".", "remote_field", ".", "model", ".", "_meta", ".", "get_field", "(", "field", ".", "remote_field", ".", "field_name", ")", ".", "column", "sql_create_fk", "=", "(", "\"ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY \"", "\"({column}) REFERENCES {to_table} ({to_column}) \"", "\"ON DELETE CASCADE {deferrable};\"", ")", ".", "format", "(", "table", "=", "schema_editor", ".", "quote_name", "(", "table", ")", ",", "name", "=", "schema_editor", ".", "quote_name", "(", "name", ")", ",", "column", "=", "schema_editor", ".", "quote_name", "(", "field", ".", "column", ")", ",", "to_table", "=", "schema_editor", ".", "quote_name", "(", "to_table", ")", ",", "to_column", "=", "schema_editor", ".", "quote_name", "(", "to_column", ")", ",", "deferrable", "=", "schema_editor", ".", "connection", ".", "ops", ".", "deferrable_sql", "(", ")", ",", ")", "schema_editor", ".", "execute", "(", "sql_create_fk", ")" ]
Delete's the current foreign key contraint on the outbound field, and adds it again, but this time with an ON DELETE clause
[ "Delete", "s", "the", "current", "foreign", "key", "contraint", "on", "the", "outbound", "field", "and", "adds", "it", "again", "but", "this", "time", "with", "an", "ON", "DELETE", "clause" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/migrations/0020_outboundsendfailure_db_backed_fk_constraint.py#L10-L47
train
PythonOptimizers/cygenja
cygenja/generator.py
Generator.log_warning
def log_warning(self, msg): """ Log a warning if ``logger`` exists. Args: msg: Warning to log. Warning: Can raise a ``RuntimeError`` if this was asked in the constructor. """ if self.__logger: self.__logger.warning(msg) if self.__raise_exception_on_warning: raise RuntimeError(msg)
python
def log_warning(self, msg): """ Log a warning if ``logger`` exists. Args: msg: Warning to log. Warning: Can raise a ``RuntimeError`` if this was asked in the constructor. """ if self.__logger: self.__logger.warning(msg) if self.__raise_exception_on_warning: raise RuntimeError(msg)
[ "def", "log_warning", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "__logger", ":", "self", ".", "__logger", ".", "warning", "(", "msg", ")", "if", "self", ".", "__raise_exception_on_warning", ":", "raise", "RuntimeError", "(", "msg", ")" ]
Log a warning if ``logger`` exists. Args: msg: Warning to log. Warning: Can raise a ``RuntimeError`` if this was asked in the constructor.
[ "Log", "a", "warning", "if", "logger", "exists", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L133-L148
train
PythonOptimizers/cygenja
cygenja/generator.py
Generator.log_error
def log_error(self, msg): """ Log an error and raise an exception. Args: msg: Error message to log. Raises: RuntimeError: With the message. """ if self.__logger: self.__logger.error(msg) raise RuntimeError(msg)
python
def log_error(self, msg): """ Log an error and raise an exception. Args: msg: Error message to log. Raises: RuntimeError: With the message. """ if self.__logger: self.__logger.error(msg) raise RuntimeError(msg)
[ "def", "log_error", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "__logger", ":", "self", ".", "__logger", ".", "error", "(", "msg", ")", "raise", "RuntimeError", "(", "msg", ")" ]
Log an error and raise an exception. Args: msg: Error message to log. Raises: RuntimeError: With the message.
[ "Log", "an", "error", "and", "raise", "an", "exception", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L150-L163
train
PythonOptimizers/cygenja
cygenja/generator.py
Generator.__add_action
def __add_action(self, relative_directory, action): """ Add action into the dictionary of actions. Args: relative_directory: action: """ generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None) if generator_action_container is None: generator_action_container = GeneratorActionContainer() generator_action_container.add_generator_action(action) self.__actions.add_element(location=relative_directory, element=generator_action_container) else: generator_action_container.add_generator_action(action)
python
def __add_action(self, relative_directory, action): """ Add action into the dictionary of actions. Args: relative_directory: action: """ generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None) if generator_action_container is None: generator_action_container = GeneratorActionContainer() generator_action_container.add_generator_action(action) self.__actions.add_element(location=relative_directory, element=generator_action_container) else: generator_action_container.add_generator_action(action)
[ "def", "__add_action", "(", "self", ",", "relative_directory", ",", "action", ")", ":", "generator_action_container", "=", "self", ".", "__actions", ".", "retrieve_element_or_default", "(", "relative_directory", ",", "None", ")", "if", "generator_action_container", "is", "None", ":", "generator_action_container", "=", "GeneratorActionContainer", "(", ")", "generator_action_container", ".", "add_generator_action", "(", "action", ")", "self", ".", "__actions", ".", "add_element", "(", "location", "=", "relative_directory", ",", "element", "=", "generator_action_container", ")", "else", ":", "generator_action_container", ".", "add_generator_action", "(", "action", ")" ]
Add action into the dictionary of actions. Args: relative_directory: action:
[ "Add", "action", "into", "the", "dictionary", "of", "actions", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L285-L301
train
PythonOptimizers/cygenja
cygenja/generator.py
Generator.__is_function_action
def __is_function_action(self, action_function): """ Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not. """ # test if function returns a couple of values is_function_action = True if not hasattr(action_function, '__call__'): return False # OK, callable. Do we receive the right arguments? try: for end_string, context in action_function(): if not isinstance(end_string, basestring): self.log_error("Action function must return end of filename as a string as first argument") if not isinstance(context, dict): self.log_error("Action function must return context as a dict as second argument") break except Exception: is_function_action = False return is_function_action
python
def __is_function_action(self, action_function): """ Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not. """ # test if function returns a couple of values is_function_action = True if not hasattr(action_function, '__call__'): return False # OK, callable. Do we receive the right arguments? try: for end_string, context in action_function(): if not isinstance(end_string, basestring): self.log_error("Action function must return end of filename as a string as first argument") if not isinstance(context, dict): self.log_error("Action function must return context as a dict as second argument") break except Exception: is_function_action = False return is_function_action
[ "def", "__is_function_action", "(", "self", ",", "action_function", ")", ":", "# test if function returns a couple of values", "is_function_action", "=", "True", "if", "not", "hasattr", "(", "action_function", ",", "'__call__'", ")", ":", "return", "False", "# OK, callable. Do we receive the right arguments?", "try", ":", "for", "end_string", ",", "context", "in", "action_function", "(", ")", ":", "if", "not", "isinstance", "(", "end_string", ",", "basestring", ")", ":", "self", ".", "log_error", "(", "\"Action function must return end of filename as a string as first argument\"", ")", "if", "not", "isinstance", "(", "context", ",", "dict", ")", ":", "self", ".", "log_error", "(", "\"Action function must return context as a dict as second argument\"", ")", "break", "except", "Exception", ":", "is_function_action", "=", "False", "return", "is_function_action" ]
Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not.
[ "Detect", "if", "given", "function", "is", "really", "an", "action", "function", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L313-L341
train
PythonOptimizers/cygenja
cygenja/generator.py
Generator.register_default_action
def register_default_action(self, file_pattern, action_function): """ Default action used if no compatible action is found. Args: file_pattern: A :program:`fnmatch` pattern for the files concerned by this action. action_function: Warning: Be careful when defining a default action. This action is be applied to **all** template files for which no compatible action is found. You might want to prefer declare explicit actions than to rely on this implicit default action. Use at your own risks. That said, if you have lots of default cases, this default action can be very convenient and avoid lots of unnecessary action declarations. """ if self.__default_action is not None: self.log_error('Default action function already exist.') if not self.__is_function_action(action_function): self.log_error('Attached default function is not an action function.') self.__default_action = GeneratorAction(file_pattern=file_pattern, action_function=action_function)
python
def register_default_action(self, file_pattern, action_function): """ Default action used if no compatible action is found. Args: file_pattern: A :program:`fnmatch` pattern for the files concerned by this action. action_function: Warning: Be careful when defining a default action. This action is be applied to **all** template files for which no compatible action is found. You might want to prefer declare explicit actions than to rely on this implicit default action. Use at your own risks. That said, if you have lots of default cases, this default action can be very convenient and avoid lots of unnecessary action declarations. """ if self.__default_action is not None: self.log_error('Default action function already exist.') if not self.__is_function_action(action_function): self.log_error('Attached default function is not an action function.') self.__default_action = GeneratorAction(file_pattern=file_pattern, action_function=action_function)
[ "def", "register_default_action", "(", "self", ",", "file_pattern", ",", "action_function", ")", ":", "if", "self", ".", "__default_action", "is", "not", "None", ":", "self", ".", "log_error", "(", "'Default action function already exist.'", ")", "if", "not", "self", ".", "__is_function_action", "(", "action_function", ")", ":", "self", ".", "log_error", "(", "'Attached default function is not an action function.'", ")", "self", ".", "__default_action", "=", "GeneratorAction", "(", "file_pattern", "=", "file_pattern", ",", "action_function", "=", "action_function", ")" ]
Default action used if no compatible action is found. Args: file_pattern: A :program:`fnmatch` pattern for the files concerned by this action. action_function: Warning: Be careful when defining a default action. This action is be applied to **all** template files for which no compatible action is found. You might want to prefer declare explicit actions than to rely on this implicit default action. Use at your own risks. That said, if you have lots of default cases, this default action can be very convenient and avoid lots of unnecessary action declarations.
[ "Default", "action", "used", "if", "no", "compatible", "action", "is", "found", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/generator.py#L373-L393
train
ckcollab/polished
polished/backends/simple.py
SimpleBackend.prepare_page
def prepare_page(self, *args, **kwargs): ''' This is called after the page has been loaded, good time to do extra polishing ''' super(BaseBackend, self).prepare_page(*args, **kwargs)
python
def prepare_page(self, *args, **kwargs): ''' This is called after the page has been loaded, good time to do extra polishing ''' super(BaseBackend, self).prepare_page(*args, **kwargs)
[ "def", "prepare_page", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "BaseBackend", ",", "self", ")", ".", "prepare_page", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
This is called after the page has been loaded, good time to do extra polishing
[ "This", "is", "called", "after", "the", "page", "has", "been", "loaded", "good", "time", "to", "do", "extra", "polishing" ]
5a00b2fbe569bc957d1647c0849fd344db29b644
https://github.com/ckcollab/polished/blob/5a00b2fbe569bc957d1647c0849fd344db29b644/polished/backends/simple.py#L23-L27
train
Jaymon/decorators
decorators.py
Decorator.set_wrapped
def set_wrapped(self, wrapped): """This will decide what wrapped is and set .wrapped_func or .wrapped_class accordingly :param wrapped: either a function or class """ self.wrapped = wrapped functools.update_wrapper(self, self.wrapped, updated=()) self.wrapped_func = False self.wrapped_class = False if inspect.isroutine(wrapped): self.wrapped_func = True elif isinstance(wrapped, type): self.wrapped_class = True
python
def set_wrapped(self, wrapped): """This will decide what wrapped is and set .wrapped_func or .wrapped_class accordingly :param wrapped: either a function or class """ self.wrapped = wrapped functools.update_wrapper(self, self.wrapped, updated=()) self.wrapped_func = False self.wrapped_class = False if inspect.isroutine(wrapped): self.wrapped_func = True elif isinstance(wrapped, type): self.wrapped_class = True
[ "def", "set_wrapped", "(", "self", ",", "wrapped", ")", ":", "self", ".", "wrapped", "=", "wrapped", "functools", ".", "update_wrapper", "(", "self", ",", "self", ".", "wrapped", ",", "updated", "=", "(", ")", ")", "self", ".", "wrapped_func", "=", "False", "self", ".", "wrapped_class", "=", "False", "if", "inspect", ".", "isroutine", "(", "wrapped", ")", ":", "self", ".", "wrapped_func", "=", "True", "elif", "isinstance", "(", "wrapped", ",", "type", ")", ":", "self", ".", "wrapped_class", "=", "True" ]
This will decide what wrapped is and set .wrapped_func or .wrapped_class accordingly :param wrapped: either a function or class
[ "This", "will", "decide", "what", "wrapped", "is", "and", "set", ".", "wrapped_func", "or", ".", "wrapped_class", "accordingly" ]
20525e93b5d259cfa9be771c43279825724c109e
https://github.com/Jaymon/decorators/blob/20525e93b5d259cfa9be771c43279825724c109e/decorators.py#L126-L141
train
Jaymon/decorators
decorators.py
Decorator.decorate_class
def decorate_class(self, klass, *decorator_args, **decorator_kwargs): """override this in a child class with your own logic, it must return a function that returns klass or the like :param klass: the class object that is being decorated :param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2)) :param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1)) :returns: the wrapped class """ raise RuntimeError("decorator {} does not support class decoration".format(self.__class__.__name__)) return klass
python
def decorate_class(self, klass, *decorator_args, **decorator_kwargs): """override this in a child class with your own logic, it must return a function that returns klass or the like :param klass: the class object that is being decorated :param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2)) :param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1)) :returns: the wrapped class """ raise RuntimeError("decorator {} does not support class decoration".format(self.__class__.__name__)) return klass
[ "def", "decorate_class", "(", "self", ",", "klass", ",", "*", "decorator_args", ",", "*", "*", "decorator_kwargs", ")", ":", "raise", "RuntimeError", "(", "\"decorator {} does not support class decoration\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "klass" ]
override this in a child class with your own logic, it must return a function that returns klass or the like :param klass: the class object that is being decorated :param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2)) :param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1)) :returns: the wrapped class
[ "override", "this", "in", "a", "child", "class", "with", "your", "own", "logic", "it", "must", "return", "a", "function", "that", "returns", "klass", "or", "the", "like" ]
20525e93b5d259cfa9be771c43279825724c109e
https://github.com/Jaymon/decorators/blob/20525e93b5d259cfa9be771c43279825724c109e/decorators.py#L212-L222
train
Jaymon/decorators
decorators.py
InstanceDecorator.decorate_class
def decorate_class(self, klass, *decorator_args, **decorator_kwargs): """where the magic happens, this wraps a class to call our decorate method in the init of the class """ class ChildClass(klass): def __init__(slf, *args, **kwargs): super(ChildClass, slf).__init__(*args, **kwargs) self.decorate( slf, *decorator_args, **decorator_kwargs ) decorate_klass = ChildClass decorate_klass.__name__ = klass.__name__ decorate_klass.__module__ = klass.__module__ # for some reason you can't update a __doc__ on a class # http://bugs.python.org/issue12773 return decorate_klass
python
def decorate_class(self, klass, *decorator_args, **decorator_kwargs): """where the magic happens, this wraps a class to call our decorate method in the init of the class """ class ChildClass(klass): def __init__(slf, *args, **kwargs): super(ChildClass, slf).__init__(*args, **kwargs) self.decorate( slf, *decorator_args, **decorator_kwargs ) decorate_klass = ChildClass decorate_klass.__name__ = klass.__name__ decorate_klass.__module__ = klass.__module__ # for some reason you can't update a __doc__ on a class # http://bugs.python.org/issue12773 return decorate_klass
[ "def", "decorate_class", "(", "self", ",", "klass", ",", "*", "decorator_args", ",", "*", "*", "decorator_kwargs", ")", ":", "class", "ChildClass", "(", "klass", ")", ":", "def", "__init__", "(", "slf", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "ChildClass", ",", "slf", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "decorate", "(", "slf", ",", "*", "decorator_args", ",", "*", "*", "decorator_kwargs", ")", "decorate_klass", "=", "ChildClass", "decorate_klass", ".", "__name__", "=", "klass", ".", "__name__", "decorate_klass", ".", "__module__", "=", "klass", ".", "__module__", "# for some reason you can't update a __doc__ on a class", "# http://bugs.python.org/issue12773", "return", "decorate_klass" ]
where the magic happens, this wraps a class to call our decorate method in the init of the class
[ "where", "the", "magic", "happens", "this", "wraps", "a", "class", "to", "call", "our", "decorate", "method", "in", "the", "init", "of", "the", "class" ]
20525e93b5d259cfa9be771c43279825724c109e
https://github.com/Jaymon/decorators/blob/20525e93b5d259cfa9be771c43279825724c109e/decorators.py#L238-L255
train
bradmontgomery/django-blargg
blargg/models.py
generate_entry_tags
def generate_entry_tags(sender, instance, created, raw, using, **kwargs): """Generate the M2M ``Tag``s for an ``Entry`` right after it has been saved.""" Tag.objects.create_tags(instance)
python
def generate_entry_tags(sender, instance, created, raw, using, **kwargs): """Generate the M2M ``Tag``s for an ``Entry`` right after it has been saved.""" Tag.objects.create_tags(instance)
[ "def", "generate_entry_tags", "(", "sender", ",", "instance", ",", "created", ",", "raw", ",", "using", ",", "*", "*", "kwargs", ")", ":", "Tag", ".", "objects", ".", "create_tags", "(", "instance", ")" ]
Generate the M2M ``Tag``s for an ``Entry`` right after it has been saved.
[ "Generate", "the", "M2M", "Tag", "s", "for", "an", "Entry", "right", "after", "it", "has", "been", "saved", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L247-L250
train
bradmontgomery/django-blargg
blargg/models.py
entry_stats
def entry_stats(entries, top_n=10): """Calculates stats for the given ``QuerySet`` of ``Entry``s.""" wc = Counter() # A Word counter for content in entries.values_list("rendered_content", flat=True): # Do a little cleanup content = strip_tags(content) # remove all html tags content = re.sub('\s+', ' ', content) # condense all whitespace content = re.sub('[^A-Za-z ]+', '', content) # remove non-alpha chars words = [w.lower() for w in content.split()] wc.update([w for w in words if w not in IGNORE_WORDS]) return { "total_words": len(wc.values()), "most_common": wc.most_common(top_n), }
python
def entry_stats(entries, top_n=10): """Calculates stats for the given ``QuerySet`` of ``Entry``s.""" wc = Counter() # A Word counter for content in entries.values_list("rendered_content", flat=True): # Do a little cleanup content = strip_tags(content) # remove all html tags content = re.sub('\s+', ' ', content) # condense all whitespace content = re.sub('[^A-Za-z ]+', '', content) # remove non-alpha chars words = [w.lower() for w in content.split()] wc.update([w for w in words if w not in IGNORE_WORDS]) return { "total_words": len(wc.values()), "most_common": wc.most_common(top_n), }
[ "def", "entry_stats", "(", "entries", ",", "top_n", "=", "10", ")", ":", "wc", "=", "Counter", "(", ")", "# A Word counter", "for", "content", "in", "entries", ".", "values_list", "(", "\"rendered_content\"", ",", "flat", "=", "True", ")", ":", "# Do a little cleanup", "content", "=", "strip_tags", "(", "content", ")", "# remove all html tags", "content", "=", "re", ".", "sub", "(", "'\\s+'", ",", "' '", ",", "content", ")", "# condense all whitespace", "content", "=", "re", ".", "sub", "(", "'[^A-Za-z ]+'", ",", "''", ",", "content", ")", "# remove non-alpha chars", "words", "=", "[", "w", ".", "lower", "(", ")", "for", "w", "in", "content", ".", "split", "(", ")", "]", "wc", ".", "update", "(", "[", "w", "for", "w", "in", "words", "if", "w", "not", "in", "IGNORE_WORDS", "]", ")", "return", "{", "\"total_words\"", ":", "len", "(", "wc", ".", "values", "(", ")", ")", ",", "\"most_common\"", ":", "wc", ".", "most_common", "(", "top_n", ")", ",", "}" ]
Calculates stats for the given ``QuerySet`` of ``Entry``s.
[ "Calculates", "stats", "for", "the", "given", "QuerySet", "of", "Entry", "s", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L258-L274
train