repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
shaypal5/strct
strct/dicts/_dict.py
sum_num_dicts
def sum_num_dicts(dicts, normalize=False): """Sums the given dicts into a single dict mapping each key to the sum of its mappings in all given dicts. Parameters ---------- dicts : list A list of dict objects mapping each key to an numeric value. normalize : bool, default False Indicated whether to normalize all values by value sum. Returns ------- dict A dict where each key is mapped to the sum of its mappings in all given dicts. Example ------- >>> dict1 = {'a': 3, 'b': 2} >>> dict2 = {'a':7, 'c': 8} >>> result = sum_num_dicts([dict1, dict2]) >>> print(sorted(result.items())) [('a', 10), ('b', 2), ('c', 8)] >>> result = sum_num_dicts([dict1, dict2], normalize=True) >>> print(sorted(result.items())) [('a', 0.5), ('b', 0.1), ('c', 0.4)] """ sum_dict = {} for dicti in dicts: for key in dicti: sum_dict[key] = sum_dict.get(key, 0) + dicti[key] if normalize: return norm_int_dict(sum_dict) return sum_dict
python
def sum_num_dicts(dicts, normalize=False): """Sums the given dicts into a single dict mapping each key to the sum of its mappings in all given dicts. Parameters ---------- dicts : list A list of dict objects mapping each key to an numeric value. normalize : bool, default False Indicated whether to normalize all values by value sum. Returns ------- dict A dict where each key is mapped to the sum of its mappings in all given dicts. Example ------- >>> dict1 = {'a': 3, 'b': 2} >>> dict2 = {'a':7, 'c': 8} >>> result = sum_num_dicts([dict1, dict2]) >>> print(sorted(result.items())) [('a', 10), ('b', 2), ('c', 8)] >>> result = sum_num_dicts([dict1, dict2], normalize=True) >>> print(sorted(result.items())) [('a', 0.5), ('b', 0.1), ('c', 0.4)] """ sum_dict = {} for dicti in dicts: for key in dicti: sum_dict[key] = sum_dict.get(key, 0) + dicti[key] if normalize: return norm_int_dict(sum_dict) return sum_dict
[ "def", "sum_num_dicts", "(", "dicts", ",", "normalize", "=", "False", ")", ":", "sum_dict", "=", "{", "}", "for", "dicti", "in", "dicts", ":", "for", "key", "in", "dicti", ":", "sum_dict", "[", "key", "]", "=", "sum_dict", ".", "get", "(", "key", ",", "0", ")", "+", "dicti", "[", "key", "]", "if", "normalize", ":", "return", "norm_int_dict", "(", "sum_dict", ")", "return", "sum_dict" ]
Sums the given dicts into a single dict mapping each key to the sum of its mappings in all given dicts. Parameters ---------- dicts : list A list of dict objects mapping each key to an numeric value. normalize : bool, default False Indicated whether to normalize all values by value sum. Returns ------- dict A dict where each key is mapped to the sum of its mappings in all given dicts. Example ------- >>> dict1 = {'a': 3, 'b': 2} >>> dict2 = {'a':7, 'c': 8} >>> result = sum_num_dicts([dict1, dict2]) >>> print(sorted(result.items())) [('a', 10), ('b', 2), ('c', 8)] >>> result = sum_num_dicts([dict1, dict2], normalize=True) >>> print(sorted(result.items())) [('a', 0.5), ('b', 0.1), ('c', 0.4)]
[ "Sums", "the", "given", "dicts", "into", "a", "single", "dict", "mapping", "each", "key", "to", "the", "sum", "of", "its", "mappings", "in", "all", "given", "dicts", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L609-L643
train
shaypal5/strct
strct/dicts/_dict.py
reverse_dict
def reverse_dict(dict_obj): """Reverse a dict, so each value in it maps to a sorted list of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to a sorted list of all the unique keys that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3, 'c': 1} >>> reverse_dict(dicti) {1: ['a', 'c'], 3: ['b']} """ new_dict = {} for key in dict_obj: add_to_dict_val_set(dict_obj=new_dict, key=dict_obj[key], val=key) for key in new_dict: new_dict[key] = sorted(new_dict[key], reverse=False) return new_dict
python
def reverse_dict(dict_obj): """Reverse a dict, so each value in it maps to a sorted list of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to a sorted list of all the unique keys that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3, 'c': 1} >>> reverse_dict(dicti) {1: ['a', 'c'], 3: ['b']} """ new_dict = {} for key in dict_obj: add_to_dict_val_set(dict_obj=new_dict, key=dict_obj[key], val=key) for key in new_dict: new_dict[key] = sorted(new_dict[key], reverse=False) return new_dict
[ "def", "reverse_dict", "(", "dict_obj", ")", ":", "new_dict", "=", "{", "}", "for", "key", "in", "dict_obj", ":", "add_to_dict_val_set", "(", "dict_obj", "=", "new_dict", ",", "key", "=", "dict_obj", "[", "key", "]", ",", "val", "=", "key", ")", "for", "key", "in", "new_dict", ":", "new_dict", "[", "key", "]", "=", "sorted", "(", "new_dict", "[", "key", "]", ",", "reverse", "=", "False", ")", "return", "new_dict" ]
Reverse a dict, so each value in it maps to a sorted list of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to a sorted list of all the unique keys that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3, 'c': 1} >>> reverse_dict(dicti) {1: ['a', 'c'], 3: ['b']}
[ "Reverse", "a", "dict", "so", "each", "value", "in", "it", "maps", "to", "a", "sorted", "list", "of", "its", "keys", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L677-L702
train
shaypal5/strct
strct/dicts/_dict.py
reverse_dict_partial
def reverse_dict_partial(dict_obj): """Reverse a dict, so each value in it maps to one of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to the key that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3} >>> reverse_dict_partial(dicti) {1: 'a', 3: 'b'} """ new_dict = {} for key in dict_obj: new_dict[dict_obj[key]] = key return new_dict
python
def reverse_dict_partial(dict_obj): """Reverse a dict, so each value in it maps to one of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to the key that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3} >>> reverse_dict_partial(dicti) {1: 'a', 3: 'b'} """ new_dict = {} for key in dict_obj: new_dict[dict_obj[key]] = key return new_dict
[ "def", "reverse_dict_partial", "(", "dict_obj", ")", ":", "new_dict", "=", "{", "}", "for", "key", "in", "dict_obj", ":", "new_dict", "[", "dict_obj", "[", "key", "]", "]", "=", "key", "return", "new_dict" ]
Reverse a dict, so each value in it maps to one of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to the key that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3} >>> reverse_dict_partial(dicti) {1: 'a', 3: 'b'}
[ "Reverse", "a", "dict", "so", "each", "value", "in", "it", "maps", "to", "one", "of", "its", "keys", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L705-L727
train
shaypal5/strct
strct/dicts/_dict.py
reverse_list_valued_dict
def reverse_list_valued_dict(dict_obj): """Reverse a list-valued dict, so each element in a list maps to its key. Parameters ---------- dict_obj : dict A dict where each key maps to a list of unique values. Values are assumed to be unique across the entire dict, on not just per-list. Returns ------- dict A dict where each element in a value list of the input dict maps to the key that mapped to the list it belongs to. Example ------- >>> dicti = {'a': [1, 2], 'b': [3, 4]} >>> reverse_list_valued_dict(dicti) {1: 'a', 2: 'a', 3: 'b', 4: 'b'} """ new_dict = {} for key in dict_obj: for element in dict_obj[key]: new_dict[element] = key return new_dict
python
def reverse_list_valued_dict(dict_obj): """Reverse a list-valued dict, so each element in a list maps to its key. Parameters ---------- dict_obj : dict A dict where each key maps to a list of unique values. Values are assumed to be unique across the entire dict, on not just per-list. Returns ------- dict A dict where each element in a value list of the input dict maps to the key that mapped to the list it belongs to. Example ------- >>> dicti = {'a': [1, 2], 'b': [3, 4]} >>> reverse_list_valued_dict(dicti) {1: 'a', 2: 'a', 3: 'b', 4: 'b'} """ new_dict = {} for key in dict_obj: for element in dict_obj[key]: new_dict[element] = key return new_dict
[ "def", "reverse_list_valued_dict", "(", "dict_obj", ")", ":", "new_dict", "=", "{", "}", "for", "key", "in", "dict_obj", ":", "for", "element", "in", "dict_obj", "[", "key", "]", ":", "new_dict", "[", "element", "]", "=", "key", "return", "new_dict" ]
Reverse a list-valued dict, so each element in a list maps to its key. Parameters ---------- dict_obj : dict A dict where each key maps to a list of unique values. Values are assumed to be unique across the entire dict, on not just per-list. Returns ------- dict A dict where each element in a value list of the input dict maps to the key that mapped to the list it belongs to. Example ------- >>> dicti = {'a': [1, 2], 'b': [3, 4]} >>> reverse_list_valued_dict(dicti) {1: 'a', 2: 'a', 3: 'b', 4: 'b'}
[ "Reverse", "a", "list", "-", "valued", "dict", "so", "each", "element", "in", "a", "list", "maps", "to", "its", "key", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L730-L755
train
shaypal5/strct
strct/dicts/_dict.py
flatten_dict
def flatten_dict(dict_obj, separator='.', flatten_lists=False): """Flattens the given dict into a single-level dict with flattend keys. Parameters ---------- dict_obj : dict A possibly nested dict. separator : str, optional The character to use as a separator between keys. Defaults to '.'. flatten_lists : bool, optional If True, list values are also flattened. False by default. Returns ------- dict A shallow dict, where no value is a dict in itself, and keys are concatenations of original key paths separated with the given separator. Example ------- >>> dicti = {'a': 1, 'b': {'g': 4, 'o': 9}, 'x': [4, 'd']} >>> flat = flatten_dict(dicti) >>> sorted(flat.items()) [('a', 1), ('b.g', 4), ('b.o', 9), ('x.0', 4), ('x.1', 'd')] """ reducer = _get_key_reducer(separator) flat = {} def _flatten_key_val(key, val, parent): flat_key = reducer(parent, key) try: _flatten(val, flat_key) except TypeError: flat[flat_key] = val def _flatten(d, parent=None): try: for key, val in d.items(): _flatten_key_val(key, val, parent) except AttributeError: if isinstance(d, (str, bytes)): raise TypeError for i, value in enumerate(d): _flatten_key_val(str(i), value, parent) _flatten(dict_obj) return flat
python
def flatten_dict(dict_obj, separator='.', flatten_lists=False): """Flattens the given dict into a single-level dict with flattend keys. Parameters ---------- dict_obj : dict A possibly nested dict. separator : str, optional The character to use as a separator between keys. Defaults to '.'. flatten_lists : bool, optional If True, list values are also flattened. False by default. Returns ------- dict A shallow dict, where no value is a dict in itself, and keys are concatenations of original key paths separated with the given separator. Example ------- >>> dicti = {'a': 1, 'b': {'g': 4, 'o': 9}, 'x': [4, 'd']} >>> flat = flatten_dict(dicti) >>> sorted(flat.items()) [('a', 1), ('b.g', 4), ('b.o', 9), ('x.0', 4), ('x.1', 'd')] """ reducer = _get_key_reducer(separator) flat = {} def _flatten_key_val(key, val, parent): flat_key = reducer(parent, key) try: _flatten(val, flat_key) except TypeError: flat[flat_key] = val def _flatten(d, parent=None): try: for key, val in d.items(): _flatten_key_val(key, val, parent) except AttributeError: if isinstance(d, (str, bytes)): raise TypeError for i, value in enumerate(d): _flatten_key_val(str(i), value, parent) _flatten(dict_obj) return flat
[ "def", "flatten_dict", "(", "dict_obj", ",", "separator", "=", "'.'", ",", "flatten_lists", "=", "False", ")", ":", "reducer", "=", "_get_key_reducer", "(", "separator", ")", "flat", "=", "{", "}", "def", "_flatten_key_val", "(", "key", ",", "val", ",", "parent", ")", ":", "flat_key", "=", "reducer", "(", "parent", ",", "key", ")", "try", ":", "_flatten", "(", "val", ",", "flat_key", ")", "except", "TypeError", ":", "flat", "[", "flat_key", "]", "=", "val", "def", "_flatten", "(", "d", ",", "parent", "=", "None", ")", ":", "try", ":", "for", "key", ",", "val", "in", "d", ".", "items", "(", ")", ":", "_flatten_key_val", "(", "key", ",", "val", ",", "parent", ")", "except", "AttributeError", ":", "if", "isinstance", "(", "d", ",", "(", "str", ",", "bytes", ")", ")", ":", "raise", "TypeError", "for", "i", ",", "value", "in", "enumerate", "(", "d", ")", ":", "_flatten_key_val", "(", "str", "(", "i", ")", ",", "value", ",", "parent", ")", "_flatten", "(", "dict_obj", ")", "return", "flat" ]
Flattens the given dict into a single-level dict with flattend keys. Parameters ---------- dict_obj : dict A possibly nested dict. separator : str, optional The character to use as a separator between keys. Defaults to '.'. flatten_lists : bool, optional If True, list values are also flattened. False by default. Returns ------- dict A shallow dict, where no value is a dict in itself, and keys are concatenations of original key paths separated with the given separator. Example ------- >>> dicti = {'a': 1, 'b': {'g': 4, 'o': 9}, 'x': [4, 'd']} >>> flat = flatten_dict(dicti) >>> sorted(flat.items()) [('a', 1), ('b.g', 4), ('b.o', 9), ('x.0', 4), ('x.1', 'd')]
[ "Flattens", "the", "given", "dict", "into", "a", "single", "-", "level", "dict", "with", "flattend", "keys", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L766-L812
train
shaypal5/strct
strct/dicts/_dict.py
pprint_int_dict
def pprint_int_dict(int_dict, indent=4, descending=False): """Prints the given dict with int values in a nice way. Parameters ---------- int_dict : list A dict object mapping each key to an int value. """ sorted_tup = sorted(int_dict.items(), key=lambda x: x[1]) if descending: sorted_tup.reverse() print('{') for tup in sorted_tup: print('{}{}: {}'.format(' '*indent, tup[0], tup[1])) print('}')
python
def pprint_int_dict(int_dict, indent=4, descending=False): """Prints the given dict with int values in a nice way. Parameters ---------- int_dict : list A dict object mapping each key to an int value. """ sorted_tup = sorted(int_dict.items(), key=lambda x: x[1]) if descending: sorted_tup.reverse() print('{') for tup in sorted_tup: print('{}{}: {}'.format(' '*indent, tup[0], tup[1])) print('}')
[ "def", "pprint_int_dict", "(", "int_dict", ",", "indent", "=", "4", ",", "descending", "=", "False", ")", ":", "sorted_tup", "=", "sorted", "(", "int_dict", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "if", "descending", ":", "sorted_tup", ".", "reverse", "(", ")", "print", "(", "'{'", ")", "for", "tup", "in", "sorted_tup", ":", "print", "(", "'{}{}: {}'", ".", "format", "(", "' '", "*", "indent", ",", "tup", "[", "0", "]", ",", "tup", "[", "1", "]", ")", ")", "print", "(", "'}'", ")" ]
Prints the given dict with int values in a nice way. Parameters ---------- int_dict : list A dict object mapping each key to an int value.
[ "Prints", "the", "given", "dict", "with", "int", "values", "in", "a", "nice", "way", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L815-L829
train
shaypal5/strct
strct/dicts/_dict.py
key_value_nested_generator
def key_value_nested_generator(dict_obj): """Recursively iterate over key-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_value_nested_generator(dicti)))) [('a', 1), ('c', 3), ('d', 4)] """ for key, value in dict_obj.items(): if isinstance(value, dict): for key, value in key_value_nested_generator(value): yield key, value else: yield key, value
python
def key_value_nested_generator(dict_obj): """Recursively iterate over key-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_value_nested_generator(dicti)))) [('a', 1), ('c', 3), ('d', 4)] """ for key, value in dict_obj.items(): if isinstance(value, dict): for key, value in key_value_nested_generator(value): yield key, value else: yield key, value
[ "def", "key_value_nested_generator", "(", "dict_obj", ")", ":", "for", "key", ",", "value", "in", "dict_obj", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "key_value_nested_generator", "(", "value", ")", ":", "yield", "key", ",", "value", "else", ":", "yield", "key", ",", "value" ]
Recursively iterate over key-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_value_nested_generator(dicti)))) [('a', 1), ('c', 3), ('d', 4)]
[ "Recursively", "iterate", "over", "key", "-", "value", "pairs", "of", "nested", "dictionaries", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L850-L874
train
shaypal5/strct
strct/dicts/_dict.py
key_tuple_value_nested_generator
def key_tuple_value_nested_generator(dict_obj): """Recursively iterate over key-tuple-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-tuple-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_tuple_value_nested_generator(dicti)))) [(('a',), 1), (('b', 'c'), 3), (('b', 'd'), 4)] """ for key, value in dict_obj.items(): if isinstance(value, dict): for nested_key, value in key_tuple_value_nested_generator(value): yield tuple([key]) + nested_key, value else: yield tuple([key]), value
python
def key_tuple_value_nested_generator(dict_obj): """Recursively iterate over key-tuple-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-tuple-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_tuple_value_nested_generator(dicti)))) [(('a',), 1), (('b', 'c'), 3), (('b', 'd'), 4)] """ for key, value in dict_obj.items(): if isinstance(value, dict): for nested_key, value in key_tuple_value_nested_generator(value): yield tuple([key]) + nested_key, value else: yield tuple([key]), value
[ "def", "key_tuple_value_nested_generator", "(", "dict_obj", ")", ":", "for", "key", ",", "value", "in", "dict_obj", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "nested_key", ",", "value", "in", "key_tuple_value_nested_generator", "(", "value", ")", ":", "yield", "tuple", "(", "[", "key", "]", ")", "+", "nested_key", ",", "value", "else", ":", "yield", "tuple", "(", "[", "key", "]", ")", ",", "value" ]
Recursively iterate over key-tuple-value pairs of nested dictionaries. Parameters ---------- dict_obj : dict The outer-most dict to iterate on. Returns ------- generator A generator over key-tuple-value pairs in all nested dictionaries. Example ------- >>> dicti = {'a': 1, 'b': {'c': 3, 'd': 4}} >>> print(sorted(list(key_tuple_value_nested_generator(dicti)))) [(('a',), 1), (('b', 'c'), 3), (('b', 'd'), 4)]
[ "Recursively", "iterate", "over", "key", "-", "tuple", "-", "value", "pairs", "of", "nested", "dictionaries", "." ]
f3a301692d052ddb79331230b3c00625db1d83fc
https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/dicts/_dict.py#L877-L901
train
cloudbase/python-hnvclient
hnv/config/client.py
HVNOptions.register
def register(self): """Register the current options to the global ConfigOpts object.""" group = cfg.OptGroup( self.group_name, title="HNV (Hyper-V Network Virtualization) Options") self._config.register_group(group) self._config.register_opts(self._options, group=group)
python
def register(self): """Register the current options to the global ConfigOpts object.""" group = cfg.OptGroup( self.group_name, title="HNV (Hyper-V Network Virtualization) Options") self._config.register_group(group) self._config.register_opts(self._options, group=group)
[ "def", "register", "(", "self", ")", ":", "group", "=", "cfg", ".", "OptGroup", "(", "self", ".", "group_name", ",", "title", "=", "\"HNV (Hyper-V Network Virtualization) Options\"", ")", "self", ".", "_config", ".", "register_group", "(", "group", ")", "self", ".", "_config", ".", "register_opts", "(", "self", ".", "_options", ",", "group", "=", "group", ")" ]
Register the current options to the global ConfigOpts object.
[ "Register", "the", "current", "options", "to", "the", "global", "ConfigOpts", "object", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/config/client.py#L68-L74
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py
ShexNodeExpressionParser._language_exclusions
def _language_exclusions(stem: LanguageStemRange, exclusions: List[ShExDocParser.LanguageExclusionContext]) -> None: """ languageExclusion = '-' LANGTAG STEM_MARK?""" for excl in exclusions: excl_langtag = LANGTAG(excl.LANGTAG().getText()[1:]) stem.exclusions.append(LanguageStem(excl_langtag) if excl.STEM_MARK() else excl_langtag)
python
def _language_exclusions(stem: LanguageStemRange, exclusions: List[ShExDocParser.LanguageExclusionContext]) -> None: """ languageExclusion = '-' LANGTAG STEM_MARK?""" for excl in exclusions: excl_langtag = LANGTAG(excl.LANGTAG().getText()[1:]) stem.exclusions.append(LanguageStem(excl_langtag) if excl.STEM_MARK() else excl_langtag)
[ "def", "_language_exclusions", "(", "stem", ":", "LanguageStemRange", ",", "exclusions", ":", "List", "[", "ShExDocParser", ".", "LanguageExclusionContext", "]", ")", "->", "None", ":", "for", "excl", "in", "exclusions", ":", "excl_langtag", "=", "LANGTAG", "(", "excl", ".", "LANGTAG", "(", ")", ".", "getText", "(", ")", "[", "1", ":", "]", ")", "stem", ".", "exclusions", ".", "append", "(", "LanguageStem", "(", "excl_langtag", ")", "if", "excl", ".", "STEM_MARK", "(", ")", "else", "excl_langtag", ")" ]
languageExclusion = '-' LANGTAG STEM_MARK?
[ "languageExclusion", "=", "-", "LANGTAG", "STEM_MARK?" ]
4497cd1f73fa6703bca6e2cb53ba9c120f22e48c
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L147-L152
train
gtaylor/django-athumb
athumb/pial/engines/base.py
EngineBase.create_thumbnail
def create_thumbnail(self, image, geometry, upscale=True, crop=None, colorspace='RGB'): """ This serves as a really basic example of a thumbnailing method. You may want to implement your own logic, but this will work for simple cases. :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :param tuple geometry: Geometry of the image in the format of (x,y). :keyword str crop: A cropping offset string. This is either one or two space-separated values. If only one value is specified, the cropping amount (pixels or percentage) for both X and Y dimensions is the amount given. If two values are specified, X and Y dimension cropping may be set independently. Some examples: '50% 50%', '50px 20px', '50%', '50px'. :keyword str colorspace: The colorspace to set/convert the image to. This is typically 'RGB' or 'GRAY'. :returns: The thumbnailed image. The returned type depends on your choice of Engine. """ image = self.colorspace(image, colorspace) image = self.scale(image, geometry, upscale, crop) image = self.crop(image, geometry, crop) return image
python
def create_thumbnail(self, image, geometry, upscale=True, crop=None, colorspace='RGB'): """ This serves as a really basic example of a thumbnailing method. You may want to implement your own logic, but this will work for simple cases. :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :param tuple geometry: Geometry of the image in the format of (x,y). :keyword str crop: A cropping offset string. This is either one or two space-separated values. If only one value is specified, the cropping amount (pixels or percentage) for both X and Y dimensions is the amount given. If two values are specified, X and Y dimension cropping may be set independently. Some examples: '50% 50%', '50px 20px', '50%', '50px'. :keyword str colorspace: The colorspace to set/convert the image to. This is typically 'RGB' or 'GRAY'. :returns: The thumbnailed image. The returned type depends on your choice of Engine. """ image = self.colorspace(image, colorspace) image = self.scale(image, geometry, upscale, crop) image = self.crop(image, geometry, crop) return image
[ "def", "create_thumbnail", "(", "self", ",", "image", ",", "geometry", ",", "upscale", "=", "True", ",", "crop", "=", "None", ",", "colorspace", "=", "'RGB'", ")", ":", "image", "=", "self", ".", "colorspace", "(", "image", ",", "colorspace", ")", "image", "=", "self", ".", "scale", "(", "image", ",", "geometry", ",", "upscale", ",", "crop", ")", "image", "=", "self", ".", "crop", "(", "image", ",", "geometry", ",", "crop", ")", "return", "image" ]
This serves as a really basic example of a thumbnailing method. You may want to implement your own logic, but this will work for simple cases. :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :param tuple geometry: Geometry of the image in the format of (x,y). :keyword str crop: A cropping offset string. This is either one or two space-separated values. If only one value is specified, the cropping amount (pixels or percentage) for both X and Y dimensions is the amount given. If two values are specified, X and Y dimension cropping may be set independently. Some examples: '50% 50%', '50px 20px', '50%', '50px'. :keyword str colorspace: The colorspace to set/convert the image to. This is typically 'RGB' or 'GRAY'. :returns: The thumbnailed image. The returned type depends on your choice of Engine.
[ "This", "serves", "as", "a", "really", "basic", "example", "of", "a", "thumbnailing", "method", ".", "You", "may", "want", "to", "implement", "your", "own", "logic", "but", "this", "will", "work", "for", "simple", "cases", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/base.py#L17-L42
train
GearPlug/payu-python
payu/tokenization.py
Tokenization.get_tokens
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date): """ With this functionality you can query previously the Credit Cards Token. Args: payer_id: credit_card_token_id: start_date: end_date: Returns: """ payload = { "language": self.client.language.value, "command": PaymentCommand.GET_TOKENS.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "creditCardTokenInformation": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id, "startDate": start_date.strftime('%Y-%m-%dT%H:%M:%S'), "endDate": end_date.strftime('%Y-%m-%dT%H:%M:%S') }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
python
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date): """ With this functionality you can query previously the Credit Cards Token. Args: payer_id: credit_card_token_id: start_date: end_date: Returns: """ payload = { "language": self.client.language.value, "command": PaymentCommand.GET_TOKENS.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "creditCardTokenInformation": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id, "startDate": start_date.strftime('%Y-%m-%dT%H:%M:%S'), "endDate": end_date.strftime('%Y-%m-%dT%H:%M:%S') }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
[ "def", "get_tokens", "(", "self", ",", "*", ",", "payer_id", ",", "credit_card_token_id", ",", "start_date", ",", "end_date", ")", ":", "payload", "=", "{", "\"language\"", ":", "self", ".", "client", ".", "language", ".", "value", ",", "\"command\"", ":", "PaymentCommand", ".", "GET_TOKENS", ".", "value", ",", "\"merchant\"", ":", "{", "\"apiLogin\"", ":", "self", ".", "client", ".", "api_login", ",", "\"apiKey\"", ":", "self", ".", "client", ".", "api_key", "}", ",", "\"creditCardTokenInformation\"", ":", "{", "\"payerId\"", ":", "payer_id", ",", "\"creditCardTokenId\"", ":", "credit_card_token_id", ",", "\"startDate\"", ":", "start_date", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", ",", "\"endDate\"", ":", "end_date", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "}", ",", "\"test\"", ":", "self", ".", "client", ".", "is_test", "}", "return", "self", ".", "client", ".", "_post", "(", "self", ".", "url", ",", "json", "=", "payload", ")" ]
With this functionality you can query previously the Credit Cards Token. Args: payer_id: credit_card_token_id: start_date: end_date: Returns:
[ "With", "this", "functionality", "you", "can", "query", "previously", "the", "Credit", "Cards", "Token", "." ]
47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e
https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/tokenization.py#L294-L322
train
GearPlug/payu-python
payu/tokenization.py
Tokenization.remove_token
def remove_token(self, *, payer_id, credit_card_token_id): """ This feature allows you to delete a tokenized credit card register. Args: payer_id: credit_card_token_id: Returns: """ payload = { "language": self.client.language.value, "command": PaymentCommand.REMOVE_TOKEN.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "removeCreditCardToken": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
python
def remove_token(self, *, payer_id, credit_card_token_id): """ This feature allows you to delete a tokenized credit card register. Args: payer_id: credit_card_token_id: Returns: """ payload = { "language": self.client.language.value, "command": PaymentCommand.REMOVE_TOKEN.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "removeCreditCardToken": { "payerId": payer_id, "creditCardTokenId": credit_card_token_id }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
[ "def", "remove_token", "(", "self", ",", "*", ",", "payer_id", ",", "credit_card_token_id", ")", ":", "payload", "=", "{", "\"language\"", ":", "self", ".", "client", ".", "language", ".", "value", ",", "\"command\"", ":", "PaymentCommand", ".", "REMOVE_TOKEN", ".", "value", ",", "\"merchant\"", ":", "{", "\"apiLogin\"", ":", "self", ".", "client", ".", "api_login", ",", "\"apiKey\"", ":", "self", ".", "client", ".", "api_key", "}", ",", "\"removeCreditCardToken\"", ":", "{", "\"payerId\"", ":", "payer_id", ",", "\"creditCardTokenId\"", ":", "credit_card_token_id", "}", ",", "\"test\"", ":", "self", ".", "client", ".", "is_test", "}", "return", "self", ".", "client", ".", "_post", "(", "self", ".", "url", ",", "json", "=", "payload", ")" ]
This feature allows you to delete a tokenized credit card register. Args: payer_id: credit_card_token_id: Returns:
[ "This", "feature", "allows", "you", "to", "delete", "a", "tokenized", "credit", "card", "register", "." ]
47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e
https://github.com/GearPlug/payu-python/blob/47ec5c9fc89f1f89a53ec0a68c84f358bbe3394e/payu/tokenization.py#L324-L348
train
bachiraoun/pylocker
Locker.py
Locker.set_file_path
def set_file_path(self, filePath): """ Set the file path that needs to be locked. :Parameters: #. filePath (None, path): The file that needs to be locked. When given and a lock is acquired, the file will be automatically opened for writing or reading depending on the given mode. If None is given, the locker can always be used for its general purpose as shown in the examples. """ if filePath is not None: assert isinstance(filePath, basestring), "filePath must be None or string" filePath = str(filePath) self.__filePath = filePath
python
def set_file_path(self, filePath): """ Set the file path that needs to be locked. :Parameters: #. filePath (None, path): The file that needs to be locked. When given and a lock is acquired, the file will be automatically opened for writing or reading depending on the given mode. If None is given, the locker can always be used for its general purpose as shown in the examples. """ if filePath is not None: assert isinstance(filePath, basestring), "filePath must be None or string" filePath = str(filePath) self.__filePath = filePath
[ "def", "set_file_path", "(", "self", ",", "filePath", ")", ":", "if", "filePath", "is", "not", "None", ":", "assert", "isinstance", "(", "filePath", ",", "basestring", ")", ",", "\"filePath must be None or string\"", "filePath", "=", "str", "(", "filePath", ")", "self", ".", "__filePath", "=", "filePath" ]
Set the file path that needs to be locked. :Parameters: #. filePath (None, path): The file that needs to be locked. When given and a lock is acquired, the file will be automatically opened for writing or reading depending on the given mode. If None is given, the locker can always be used for its general purpose as shown in the examples.
[ "Set", "the", "file", "path", "that", "needs", "to", "be", "locked", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L308-L321
train
bachiraoun/pylocker
Locker.py
Locker.set_lock_pass
def set_lock_pass(self, lockPass): """ Set the locking pass :Parameters: #. lockPass (string): The locking pass. """ assert isinstance(lockPass, basestring), "lockPass must be string" lockPass = str(lockPass) assert '\n' not in lockPass, "lockPass must be not contain a new line" self.__lockPass = lockPass
python
def set_lock_pass(self, lockPass): """ Set the locking pass :Parameters: #. lockPass (string): The locking pass. """ assert isinstance(lockPass, basestring), "lockPass must be string" lockPass = str(lockPass) assert '\n' not in lockPass, "lockPass must be not contain a new line" self.__lockPass = lockPass
[ "def", "set_lock_pass", "(", "self", ",", "lockPass", ")", ":", "assert", "isinstance", "(", "lockPass", ",", "basestring", ")", ",", "\"lockPass must be string\"", "lockPass", "=", "str", "(", "lockPass", ")", "assert", "'\\n'", "not", "in", "lockPass", ",", "\"lockPass must be not contain a new line\"", "self", ".", "__lockPass", "=", "lockPass" ]
Set the locking pass :Parameters: #. lockPass (string): The locking pass.
[ "Set", "the", "locking", "pass" ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L323-L333
train
bachiraoun/pylocker
Locker.py
Locker.set_lock_path
def set_lock_path(self, lockPath): """ Set the managing lock file path. :Parameters: #. lockPath (None, path): The locking file path. If None is given the locking file will be automatically created to '.lock' in the filePath directory. If filePath is None, '.lock' will be created in the current working directory. """ if lockPath is not None: assert isinstance(lockPath, basestring), "lockPath must be None or string" lockPath = str(lockPath) self.__lockPath = lockPath if self.__lockPath is None: if self.__filePath is None: self.__lockPath = os.path.join(os.getcwd(), ".lock") else: self.__lockPath = os.path.join( os.path.dirname(self.__filePath), '.lock')
python
def set_lock_path(self, lockPath): """ Set the managing lock file path. :Parameters: #. lockPath (None, path): The locking file path. If None is given the locking file will be automatically created to '.lock' in the filePath directory. If filePath is None, '.lock' will be created in the current working directory. """ if lockPath is not None: assert isinstance(lockPath, basestring), "lockPath must be None or string" lockPath = str(lockPath) self.__lockPath = lockPath if self.__lockPath is None: if self.__filePath is None: self.__lockPath = os.path.join(os.getcwd(), ".lock") else: self.__lockPath = os.path.join( os.path.dirname(self.__filePath), '.lock')
[ "def", "set_lock_path", "(", "self", ",", "lockPath", ")", ":", "if", "lockPath", "is", "not", "None", ":", "assert", "isinstance", "(", "lockPath", ",", "basestring", ")", ",", "\"lockPath must be None or string\"", "lockPath", "=", "str", "(", "lockPath", ")", "self", ".", "__lockPath", "=", "lockPath", "if", "self", ".", "__lockPath", "is", "None", ":", "if", "self", ".", "__filePath", "is", "None", ":", "self", ".", "__lockPath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\".lock\"", ")", "else", ":", "self", ".", "__lockPath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "__filePath", ")", ",", "'.lock'", ")" ]
Set the managing lock file path. :Parameters: #. lockPath (None, path): The locking file path. If None is given the locking file will be automatically created to '.lock' in the filePath directory. If filePath is None, '.lock' will be created in the current working directory.
[ "Set", "the", "managing", "lock", "file", "path", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L335-L352
train
bachiraoun/pylocker
Locker.py
Locker.set_timeout
def set_timeout(self, timeout): """ set the timeout limit. :Parameters: #. timeout (number): The maximum delay or time allowed to successfully set the lock. When timeout is exhausted before successfully setting the lock, the lock ends up not acquired. """ try: timeout = float(timeout) assert timeout>=0 assert timeout>=self.__wait except: raise Exception('timeout must be a positive number bigger than wait') self.__timeout = timeout
python
def set_timeout(self, timeout): """ set the timeout limit. :Parameters: #. timeout (number): The maximum delay or time allowed to successfully set the lock. When timeout is exhausted before successfully setting the lock, the lock ends up not acquired. """ try: timeout = float(timeout) assert timeout>=0 assert timeout>=self.__wait except: raise Exception('timeout must be a positive number bigger than wait') self.__timeout = timeout
[ "def", "set_timeout", "(", "self", ",", "timeout", ")", ":", "try", ":", "timeout", "=", "float", "(", "timeout", ")", "assert", "timeout", ">=", "0", "assert", "timeout", ">=", "self", ".", "__wait", "except", ":", "raise", "Exception", "(", "'timeout must be a positive number bigger than wait'", ")", "self", ".", "__timeout", "=", "timeout" ]
set the timeout limit. :Parameters: #. timeout (number): The maximum delay or time allowed to successfully set the lock. When timeout is exhausted before successfully setting the lock, the lock ends up not acquired.
[ "set", "the", "timeout", "limit", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L354-L369
train
bachiraoun/pylocker
Locker.py
Locker.set_wait
def set_wait(self, wait): """ set the waiting time. :Parameters: #. wait (number): The time delay between each attempt to lock. By default it's set to 0 to keeping the aquiring mechanism trying to acquire the lock without losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds or higher can be very useful in special cases when many processes are trying to acquire the lock and one of them needs to hold it a release it at a higher frequency or rate. """ try: wait = float(wait) assert wait>=0 except: raise Exception('wait must be a positive number') self.__wait = wait
python
def set_wait(self, wait): """ set the waiting time. :Parameters: #. wait (number): The time delay between each attempt to lock. By default it's set to 0 to keeping the aquiring mechanism trying to acquire the lock without losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds or higher can be very useful in special cases when many processes are trying to acquire the lock and one of them needs to hold it a release it at a higher frequency or rate. """ try: wait = float(wait) assert wait>=0 except: raise Exception('wait must be a positive number') self.__wait = wait
[ "def", "set_wait", "(", "self", ",", "wait", ")", ":", "try", ":", "wait", "=", "float", "(", "wait", ")", "assert", "wait", ">=", "0", "except", ":", "raise", "Exception", "(", "'wait must be a positive number'", ")", "self", ".", "__wait", "=", "wait" ]
set the waiting time. :Parameters: #. wait (number): The time delay between each attempt to lock. By default it's set to 0 to keeping the aquiring mechanism trying to acquire the lock without losing any time waiting. Setting wait to a higher value suchs as 0.05 seconds or higher can be very useful in special cases when many processes are trying to acquire the lock and one of them needs to hold it a release it at a higher frequency or rate.
[ "set", "the", "waiting", "time", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L371-L388
train
bachiraoun/pylocker
Locker.py
Locker.set_dead_lock
def set_dead_lock(self, deadLock): """ Set the dead lock time. :Parameters: #. deadLock (number): The time delay judging if the lock was left out mistakenly after a system crash or other unexpected reasons. Normally Locker is stable and takes care of not leaving any locking file hanging even it crashes or it is forced to stop by a user signal. """ try: deadLock = float(deadLock) assert deadLock>=0 except: raise Exception('deadLock must be a positive number') self.__deadLock = deadLock
python
def set_dead_lock(self, deadLock): """ Set the dead lock time. :Parameters: #. deadLock (number): The time delay judging if the lock was left out mistakenly after a system crash or other unexpected reasons. Normally Locker is stable and takes care of not leaving any locking file hanging even it crashes or it is forced to stop by a user signal. """ try: deadLock = float(deadLock) assert deadLock>=0 except: raise Exception('deadLock must be a positive number') self.__deadLock = deadLock
[ "def", "set_dead_lock", "(", "self", ",", "deadLock", ")", ":", "try", ":", "deadLock", "=", "float", "(", "deadLock", ")", "assert", "deadLock", ">=", "0", "except", ":", "raise", "Exception", "(", "'deadLock must be a positive number'", ")", "self", ".", "__deadLock", "=", "deadLock" ]
Set the dead lock time. :Parameters: #. deadLock (number): The time delay judging if the lock was left out mistakenly after a system crash or other unexpected reasons. Normally Locker is stable and takes care of not leaving any locking file hanging even it crashes or it is forced to stop by a user signal.
[ "Set", "the", "dead", "lock", "time", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L390-L405
train
bachiraoun/pylocker
Locker.py
Locker.release_lock
def release_lock(self, verbose=VERBOSE, raiseError=RAISE_ERROR): """ Release the lock when set and close file descriptor if opened. :Parameters: #. verbose (bool): Whether to be verbose about errors when encountered #. raiseError (bool): Whether to raise error exception when encountered :Returns: #. result (boolean): Whether the lock is succesfully released. #. code (integer, Exception): Integer code indicating the reason how the lock was successfully or unsuccessfully released. When releasing the lock generates an error, this will be caught and returned in a message Exception code. * 0: Lock is not found, therefore successfully released * 1: Lock is found empty, therefore successfully released * 2: Lock is found owned by this locker and successfully released * 3: Lock is found owned by this locker and successfully released and locked file descriptor was successfully closed * 4: Lock is found owned by another locker, this locker has no permission to release it. Therefore unsuccessfully released * Exception: Lock was not successfully released because of an unexpected error. The error is caught and returned in this Exception. In this case result is False. """ if not os.path.isfile(self.__lockPath): released = True code = 0 else: try: with open(self.__lockPath, 'rb') as fd: lock = fd.readlines() except Exception as err: code = Exception( "Unable to read release lock file '%s' (%s)"%(self.__lockPath,str(err)) ) released = False if verbose: print(str(code)) if raiseError: raise code else: if not len(lock): code = 1 released = True elif lock[0].rstrip() == self.__lockPass.encode(): try: with open(self.__lockPath, 'wb') as f: #f.write( ''.encode('utf-8') ) f.write( ''.encode() ) f.flush() os.fsync(f.fileno()) except Exception as err: released = False code = Exception( "Unable to write release lock file '%s' (%s)"%(self.__lockPath,str(err)) ) if verbose: print(str(code)) if raiseError: raise code else: released = True code = 2 else: code = 4 released = False # close file descriptor if lock is released and descriptor is not None if released and self.__fd is not None: try: if not self.__fd.closed: self.__fd.flush() os.fsync(self.__fd.fileno()) self.__fd.close() except Exception as err: code = Exception( "Unable to close file descriptor of locked file '%s' (%s)"%(self.__filePath,str(err)) ) if verbose: print(str(code)) if raiseError: raise code else: code = 3 # return return released, code
python
def release_lock(self, verbose=VERBOSE, raiseError=RAISE_ERROR): """ Release the lock when set and close file descriptor if opened. :Parameters: #. verbose (bool): Whether to be verbose about errors when encountered #. raiseError (bool): Whether to raise error exception when encountered :Returns: #. result (boolean): Whether the lock is succesfully released. #. code (integer, Exception): Integer code indicating the reason how the lock was successfully or unsuccessfully released. When releasing the lock generates an error, this will be caught and returned in a message Exception code. * 0: Lock is not found, therefore successfully released * 1: Lock is found empty, therefore successfully released * 2: Lock is found owned by this locker and successfully released * 3: Lock is found owned by this locker and successfully released and locked file descriptor was successfully closed * 4: Lock is found owned by another locker, this locker has no permission to release it. Therefore unsuccessfully released * Exception: Lock was not successfully released because of an unexpected error. The error is caught and returned in this Exception. In this case result is False. """ if not os.path.isfile(self.__lockPath): released = True code = 0 else: try: with open(self.__lockPath, 'rb') as fd: lock = fd.readlines() except Exception as err: code = Exception( "Unable to read release lock file '%s' (%s)"%(self.__lockPath,str(err)) ) released = False if verbose: print(str(code)) if raiseError: raise code else: if not len(lock): code = 1 released = True elif lock[0].rstrip() == self.__lockPass.encode(): try: with open(self.__lockPath, 'wb') as f: #f.write( ''.encode('utf-8') ) f.write( ''.encode() ) f.flush() os.fsync(f.fileno()) except Exception as err: released = False code = Exception( "Unable to write release lock file '%s' (%s)"%(self.__lockPath,str(err)) ) if verbose: print(str(code)) if raiseError: raise code else: released = True code = 2 else: code = 4 released = False # close file descriptor if lock is released and descriptor is not None if released and self.__fd is not None: try: if not self.__fd.closed: self.__fd.flush() os.fsync(self.__fd.fileno()) self.__fd.close() except Exception as err: code = Exception( "Unable to close file descriptor of locked file '%s' (%s)"%(self.__filePath,str(err)) ) if verbose: print(str(code)) if raiseError: raise code else: code = 3 # return return released, code
[ "def", "release_lock", "(", "self", ",", "verbose", "=", "VERBOSE", ",", "raiseError", "=", "RAISE_ERROR", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "__lockPath", ")", ":", "released", "=", "True", "code", "=", "0", "else", ":", "try", ":", "with", "open", "(", "self", ".", "__lockPath", ",", "'rb'", ")", "as", "fd", ":", "lock", "=", "fd", ".", "readlines", "(", ")", "except", "Exception", "as", "err", ":", "code", "=", "Exception", "(", "\"Unable to read release lock file '%s' (%s)\"", "%", "(", "self", ".", "__lockPath", ",", "str", "(", "err", ")", ")", ")", "released", "=", "False", "if", "verbose", ":", "print", "(", "str", "(", "code", ")", ")", "if", "raiseError", ":", "raise", "code", "else", ":", "if", "not", "len", "(", "lock", ")", ":", "code", "=", "1", "released", "=", "True", "elif", "lock", "[", "0", "]", ".", "rstrip", "(", ")", "==", "self", ".", "__lockPass", ".", "encode", "(", ")", ":", "try", ":", "with", "open", "(", "self", ".", "__lockPath", ",", "'wb'", ")", "as", "f", ":", "#f.write( ''.encode('utf-8') )", "f", ".", "write", "(", "''", ".", "encode", "(", ")", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "except", "Exception", "as", "err", ":", "released", "=", "False", "code", "=", "Exception", "(", "\"Unable to write release lock file '%s' (%s)\"", "%", "(", "self", ".", "__lockPath", ",", "str", "(", "err", ")", ")", ")", "if", "verbose", ":", "print", "(", "str", "(", "code", ")", ")", "if", "raiseError", ":", "raise", "code", "else", ":", "released", "=", "True", "code", "=", "2", "else", ":", "code", "=", "4", "released", "=", "False", "# close file descriptor if lock is released and descriptor is not None", "if", "released", "and", "self", ".", "__fd", "is", "not", "None", ":", "try", ":", "if", "not", "self", ".", "__fd", ".", "closed", ":", "self", ".", "__fd", ".", "flush", "(", ")", "os", ".", "fsync", "(", "self", ".", "__fd", ".", "fileno", "(", ")", ")", "self", ".", "__fd", ".", "close", "(", ")", "except", "Exception", "as", "err", ":", "code", "=", "Exception", "(", "\"Unable to close file descriptor of locked file '%s' (%s)\"", "%", "(", "self", ".", "__filePath", ",", "str", "(", "err", ")", ")", ")", "if", "verbose", ":", "print", "(", "str", "(", "code", ")", ")", "if", "raiseError", ":", "raise", "code", "else", ":", "code", "=", "3", "# return", "return", "released", ",", "code" ]
Release the lock when set and close file descriptor if opened. :Parameters: #. verbose (bool): Whether to be verbose about errors when encountered #. raiseError (bool): Whether to raise error exception when encountered :Returns: #. result (boolean): Whether the lock is succesfully released. #. code (integer, Exception): Integer code indicating the reason how the lock was successfully or unsuccessfully released. When releasing the lock generates an error, this will be caught and returned in a message Exception code. * 0: Lock is not found, therefore successfully released * 1: Lock is found empty, therefore successfully released * 2: Lock is found owned by this locker and successfully released * 3: Lock is found owned by this locker and successfully released and locked file descriptor was successfully closed * 4: Lock is found owned by another locker, this locker has no permission to release it. Therefore unsuccessfully released * Exception: Lock was not successfully released because of an unexpected error. The error is caught and returned in this Exception. In this case result is False.
[ "Release", "the", "lock", "when", "set", "and", "close", "file", "descriptor", "if", "opened", "." ]
a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b
https://github.com/bachiraoun/pylocker/blob/a542e5ec2204f5a01d67f1d73ce68d3f4eb05d8b/Locker.py#L541-L614
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
Importers.import_bert
def import_bert(self, filename, **kwargs): """BERT .ohm file import""" timestep = kwargs.get('timestep', None) if 'timestep' in kwargs: del (kwargs['timestep']) self.logger.info('Unified data format (BERT/pyGIMLi) file import') with LogDataChanges(self, filter_action='import', filter_query=os.path.basename(filename)): data, electrodes, topography = reda_bert_import.import_ohm( filename, **kwargs) if timestep is not None: data['timestep'] = timestep self._add_to_container(data) self.electrode_positions = electrodes # See issue #22 if kwargs.get('verbose', False): print('Summary:') self._describe_data(data)
python
def import_bert(self, filename, **kwargs): """BERT .ohm file import""" timestep = kwargs.get('timestep', None) if 'timestep' in kwargs: del (kwargs['timestep']) self.logger.info('Unified data format (BERT/pyGIMLi) file import') with LogDataChanges(self, filter_action='import', filter_query=os.path.basename(filename)): data, electrodes, topography = reda_bert_import.import_ohm( filename, **kwargs) if timestep is not None: data['timestep'] = timestep self._add_to_container(data) self.electrode_positions = electrodes # See issue #22 if kwargs.get('verbose', False): print('Summary:') self._describe_data(data)
[ "def", "import_bert", "(", "self", ",", "filename", ",", "*", "*", "kwargs", ")", ":", "timestep", "=", "kwargs", ".", "get", "(", "'timestep'", ",", "None", ")", "if", "'timestep'", "in", "kwargs", ":", "del", "(", "kwargs", "[", "'timestep'", "]", ")", "self", ".", "logger", ".", "info", "(", "'Unified data format (BERT/pyGIMLi) file import'", ")", "with", "LogDataChanges", "(", "self", ",", "filter_action", "=", "'import'", ",", "filter_query", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", ":", "data", ",", "electrodes", ",", "topography", "=", "reda_bert_import", ".", "import_ohm", "(", "filename", ",", "*", "*", "kwargs", ")", "if", "timestep", "is", "not", "None", ":", "data", "[", "'timestep'", "]", "=", "timestep", "self", ".", "_add_to_container", "(", "data", ")", "self", ".", "electrode_positions", "=", "electrodes", "# See issue #22", "if", "kwargs", ".", "get", "(", "'verbose'", ",", "False", ")", ":", "print", "(", "'Summary:'", ")", "self", ".", "_describe_data", "(", "data", ")" ]
BERT .ohm file import
[ "BERT", ".", "ohm", "file", "import" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L118-L135
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.to_ip
def to_ip(self): """Return of copy of the data inside a TDIP container """ if 'chargeability' in self.data.columns: tdip = reda.TDIP(data=self.data) else: raise Exception('Missing column "chargeability"') return tdip
python
def to_ip(self): """Return of copy of the data inside a TDIP container """ if 'chargeability' in self.data.columns: tdip = reda.TDIP(data=self.data) else: raise Exception('Missing column "chargeability"') return tdip
[ "def", "to_ip", "(", "self", ")", ":", "if", "'chargeability'", "in", "self", ".", "data", ".", "columns", ":", "tdip", "=", "reda", ".", "TDIP", "(", "data", "=", "self", ".", "data", ")", "else", ":", "raise", "Exception", "(", "'Missing column \"chargeability\"'", ")", "return", "tdip" ]
Return of copy of the data inside a TDIP container
[ "Return", "of", "copy", "of", "the", "data", "inside", "a", "TDIP", "container" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L189-L196
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.sub_filter
def sub_filter(self, subset, filter, inplace=True): """Apply a filter to subset of the data Examples -------- :: .subquery( 'timestep == 2', 'R > 4', ) """ # build the full query full_query = ''.join(('not (', subset, ') or not (', filter, ')')) with LogDataChanges(self, filter_action='filter', filter_query=filter): result = self.data.query(full_query, inplace=inplace) return result
python
def sub_filter(self, subset, filter, inplace=True): """Apply a filter to subset of the data Examples -------- :: .subquery( 'timestep == 2', 'R > 4', ) """ # build the full query full_query = ''.join(('not (', subset, ') or not (', filter, ')')) with LogDataChanges(self, filter_action='filter', filter_query=filter): result = self.data.query(full_query, inplace=inplace) return result
[ "def", "sub_filter", "(", "self", ",", "subset", ",", "filter", ",", "inplace", "=", "True", ")", ":", "# build the full query", "full_query", "=", "''", ".", "join", "(", "(", "'not ('", ",", "subset", ",", "') or not ('", ",", "filter", ",", "')'", ")", ")", "with", "LogDataChanges", "(", "self", ",", "filter_action", "=", "'filter'", ",", "filter_query", "=", "filter", ")", ":", "result", "=", "self", ".", "data", ".", "query", "(", "full_query", ",", "inplace", "=", "inplace", ")", "return", "result" ]
Apply a filter to subset of the data Examples -------- :: .subquery( 'timestep == 2', 'R > 4', )
[ "Apply", "a", "filter", "to", "subset", "of", "the", "data" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L216-L234
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.filter
def filter(self, query, inplace=True): """Use a query statement to filter data. Note that you specify the data to be removed! Parameters ---------- query : string The query string to be evaluated. Is directly provided to pandas.DataFrame.query inplace : bool if True, change the container dataframe in place (defaults to True) Returns ------- result : :py:class:`pandas.DataFrame` DataFrame that contains the result of the filter application """ with LogDataChanges(self, filter_action='filter', filter_query=query): result = self.data.query( 'not ({0})'.format(query), inplace=inplace, ) return result
python
def filter(self, query, inplace=True): """Use a query statement to filter data. Note that you specify the data to be removed! Parameters ---------- query : string The query string to be evaluated. Is directly provided to pandas.DataFrame.query inplace : bool if True, change the container dataframe in place (defaults to True) Returns ------- result : :py:class:`pandas.DataFrame` DataFrame that contains the result of the filter application """ with LogDataChanges(self, filter_action='filter', filter_query=query): result = self.data.query( 'not ({0})'.format(query), inplace=inplace, ) return result
[ "def", "filter", "(", "self", ",", "query", ",", "inplace", "=", "True", ")", ":", "with", "LogDataChanges", "(", "self", ",", "filter_action", "=", "'filter'", ",", "filter_query", "=", "query", ")", ":", "result", "=", "self", ".", "data", ".", "query", "(", "'not ({0})'", ".", "format", "(", "query", ")", ",", "inplace", "=", "inplace", ",", ")", "return", "result" ]
Use a query statement to filter data. Note that you specify the data to be removed! Parameters ---------- query : string The query string to be evaluated. Is directly provided to pandas.DataFrame.query inplace : bool if True, change the container dataframe in place (defaults to True) Returns ------- result : :py:class:`pandas.DataFrame` DataFrame that contains the result of the filter application
[ "Use", "a", "query", "statement", "to", "filter", "data", ".", "Note", "that", "you", "specify", "the", "data", "to", "be", "removed!" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L236-L259
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.compute_K_analytical
def compute_K_analytical(self, spacing): """Compute geometrical factors over the homogeneous half-space with a constant electrode spacing """ K = redaK.compute_K_analytical(self.data, spacing=spacing) self.data = redaK.apply_K(self.data, K) redafixK.fix_sign_with_K(self.data)
python
def compute_K_analytical(self, spacing): """Compute geometrical factors over the homogeneous half-space with a constant electrode spacing """ K = redaK.compute_K_analytical(self.data, spacing=spacing) self.data = redaK.apply_K(self.data, K) redafixK.fix_sign_with_K(self.data)
[ "def", "compute_K_analytical", "(", "self", ",", "spacing", ")", ":", "K", "=", "redaK", ".", "compute_K_analytical", "(", "self", ".", "data", ",", "spacing", "=", "spacing", ")", "self", ".", "data", "=", "redaK", ".", "apply_K", "(", "self", ".", "data", ",", "K", ")", "redafixK", ".", "fix_sign_with_K", "(", "self", ".", "data", ")" ]
Compute geometrical factors over the homogeneous half-space with a constant electrode spacing
[ "Compute", "geometrical", "factors", "over", "the", "homogeneous", "half", "-", "space", "with", "a", "constant", "electrode", "spacing" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L261-L267
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.pseudosection
def pseudosection(self, column='r', filename=None, log10=False, **kwargs): """Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object """ fig, ax, cb = PS.plot_pseudosection_type2( self.data, column=column, log10=log10, **kwargs ) if filename is not None: fig.savefig(filename, dpi=300) return fig, ax, cb
python
def pseudosection(self, column='r', filename=None, log10=False, **kwargs): """Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object """ fig, ax, cb = PS.plot_pseudosection_type2( self.data, column=column, log10=log10, **kwargs ) if filename is not None: fig.savefig(filename, dpi=300) return fig, ax, cb
[ "def", "pseudosection", "(", "self", ",", "column", "=", "'r'", ",", "filename", "=", "None", ",", "log10", "=", "False", ",", "*", "*", "kwargs", ")", ":", "fig", ",", "ax", ",", "cb", "=", "PS", ".", "plot_pseudosection_type2", "(", "self", ".", "data", ",", "column", "=", "column", ",", "log10", "=", "log10", ",", "*", "*", "kwargs", ")", "if", "filename", "is", "not", "None", ":", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "300", ")", "return", "fig", ",", "ax", ",", "cb" ]
Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object
[ "Plot", "a", "pseudosection", "of", "the", "given", "column", ".", "Note", "that", "this", "function", "only", "works", "with", "dipole", "-", "dipole", "data", "at", "the", "moment", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L328-L358
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.histogram
def histogram(self, column='r', filename=None, log10=False, **kwargs): """Plot a histogram of one data column""" return_dict = HS.plot_histograms(self.data, column) if filename is not None: return_dict['all'].savefig(filename, dpi=300) return return_dict
python
def histogram(self, column='r', filename=None, log10=False, **kwargs): """Plot a histogram of one data column""" return_dict = HS.plot_histograms(self.data, column) if filename is not None: return_dict['all'].savefig(filename, dpi=300) return return_dict
[ "def", "histogram", "(", "self", ",", "column", "=", "'r'", ",", "filename", "=", "None", ",", "log10", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return_dict", "=", "HS", ".", "plot_histograms", "(", "self", ".", "data", ",", "column", ")", "if", "filename", "is", "not", "None", ":", "return_dict", "[", "'all'", "]", ".", "savefig", "(", "filename", ",", "dpi", "=", "300", ")", "return", "return_dict" ]
Plot a histogram of one data column
[ "Plot", "a", "histogram", "of", "one", "data", "column" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L360-L365
train
geophysics-ubonn/reda
lib/reda/containers/ERT.py
ERT.delete_measurements
def delete_measurements(self, row_or_rows): """Delete one or more measurements by index of the DataFrame. Resets the DataFrame index. Parameters ---------- row_or_rows : int or list of ints Row numbers (starting with zero) of the data DataFrame (ert.data) to delete Returns ------- None """ self.data.drop(self.data.index[row_or_rows], inplace=True) self.data = self.data.reset_index()
python
def delete_measurements(self, row_or_rows): """Delete one or more measurements by index of the DataFrame. Resets the DataFrame index. Parameters ---------- row_or_rows : int or list of ints Row numbers (starting with zero) of the data DataFrame (ert.data) to delete Returns ------- None """ self.data.drop(self.data.index[row_or_rows], inplace=True) self.data = self.data.reset_index()
[ "def", "delete_measurements", "(", "self", ",", "row_or_rows", ")", ":", "self", ".", "data", ".", "drop", "(", "self", ".", "data", ".", "index", "[", "row_or_rows", "]", ",", "inplace", "=", "True", ")", "self", ".", "data", "=", "self", ".", "data", ".", "reset_index", "(", ")" ]
Delete one or more measurements by index of the DataFrame. Resets the DataFrame index. Parameters ---------- row_or_rows : int or list of ints Row numbers (starting with zero) of the data DataFrame (ert.data) to delete Returns ------- None
[ "Delete", "one", "or", "more", "measurements", "by", "index", "of", "the", "DataFrame", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/ERT.py#L371-L388
train
gtaylor/django-athumb
athumb/pial/engines/pil_engine.py
PILEngine.get_image
def get_image(self, source): """ Given a file-like object, loads it up into a PIL.Image object and returns it. :param file source: A file-like object to load the image from. :rtype: PIL.Image :returns: The loaded image. """ buf = StringIO(source.read()) return Image.open(buf)
python
def get_image(self, source): """ Given a file-like object, loads it up into a PIL.Image object and returns it. :param file source: A file-like object to load the image from. :rtype: PIL.Image :returns: The loaded image. """ buf = StringIO(source.read()) return Image.open(buf)
[ "def", "get_image", "(", "self", ",", "source", ")", ":", "buf", "=", "StringIO", "(", "source", ".", "read", "(", ")", ")", "return", "Image", ".", "open", "(", "buf", ")" ]
Given a file-like object, loads it up into a PIL.Image object and returns it. :param file source: A file-like object to load the image from. :rtype: PIL.Image :returns: The loaded image.
[ "Given", "a", "file", "-", "like", "object", "loads", "it", "up", "into", "a", "PIL", ".", "Image", "object", "and", "returns", "it", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/pil_engine.py#L13-L23
train
gtaylor/django-athumb
athumb/pial/engines/pil_engine.py
PILEngine.is_valid_image
def is_valid_image(self, raw_data): """ Checks if the supplied raw data is valid image data. :param str raw_data: A string representation of the image data. :rtype: bool :returns: ``True`` if ``raw_data`` is valid, ``False`` if not. """ buf = StringIO(raw_data) try: trial_image = Image.open(buf) trial_image.verify() except Exception: # TODO: Get more specific with this exception handling. return False return True
python
def is_valid_image(self, raw_data): """ Checks if the supplied raw data is valid image data. :param str raw_data: A string representation of the image data. :rtype: bool :returns: ``True`` if ``raw_data`` is valid, ``False`` if not. """ buf = StringIO(raw_data) try: trial_image = Image.open(buf) trial_image.verify() except Exception: # TODO: Get more specific with this exception handling. return False return True
[ "def", "is_valid_image", "(", "self", ",", "raw_data", ")", ":", "buf", "=", "StringIO", "(", "raw_data", ")", "try", ":", "trial_image", "=", "Image", ".", "open", "(", "buf", ")", "trial_image", ".", "verify", "(", ")", "except", "Exception", ":", "# TODO: Get more specific with this exception handling.", "return", "False", "return", "True" ]
Checks if the supplied raw data is valid image data. :param str raw_data: A string representation of the image data. :rtype: bool :returns: ``True`` if ``raw_data`` is valid, ``False`` if not.
[ "Checks", "if", "the", "supplied", "raw", "data", "is", "valid", "image", "data", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/pil_engine.py#L35-L50
train
gtaylor/django-athumb
athumb/pial/engines/pil_engine.py
PILEngine._colorspace
def _colorspace(self, image, colorspace): """ Sets the image's colorspace. This is typical 'RGB' or 'GRAY', but may be other things, depending on your choice of Engine. :param PIL.Image image: The image whose colorspace to adjust. :param str colorspace: One of either 'RGB' or 'GRAY'. :rtype: PIL.Image :returns: The colorspace-adjusted image. """ if colorspace == 'RGB': if image.mode == 'RGBA': # RGBA is just RGB + Alpha return image if image.mode == 'P' and 'transparency' in image.info: return image.convert('RGBA') return image.convert('RGB') if colorspace == 'GRAY': return image.convert('L') return image
python
def _colorspace(self, image, colorspace): """ Sets the image's colorspace. This is typical 'RGB' or 'GRAY', but may be other things, depending on your choice of Engine. :param PIL.Image image: The image whose colorspace to adjust. :param str colorspace: One of either 'RGB' or 'GRAY'. :rtype: PIL.Image :returns: The colorspace-adjusted image. """ if colorspace == 'RGB': if image.mode == 'RGBA': # RGBA is just RGB + Alpha return image if image.mode == 'P' and 'transparency' in image.info: return image.convert('RGBA') return image.convert('RGB') if colorspace == 'GRAY': return image.convert('L') return image
[ "def", "_colorspace", "(", "self", ",", "image", ",", "colorspace", ")", ":", "if", "colorspace", "==", "'RGB'", ":", "if", "image", ".", "mode", "==", "'RGBA'", ":", "# RGBA is just RGB + Alpha", "return", "image", "if", "image", ".", "mode", "==", "'P'", "and", "'transparency'", "in", "image", ".", "info", ":", "return", "image", ".", "convert", "(", "'RGBA'", ")", "return", "image", ".", "convert", "(", "'RGB'", ")", "if", "colorspace", "==", "'GRAY'", ":", "return", "image", ".", "convert", "(", "'L'", ")", "return", "image" ]
Sets the image's colorspace. This is typical 'RGB' or 'GRAY', but may be other things, depending on your choice of Engine. :param PIL.Image image: The image whose colorspace to adjust. :param str colorspace: One of either 'RGB' or 'GRAY'. :rtype: PIL.Image :returns: The colorspace-adjusted image.
[ "Sets", "the", "image", "s", "colorspace", ".", "This", "is", "typical", "RGB", "or", "GRAY", "but", "may", "be", "other", "things", "depending", "on", "your", "choice", "of", "Engine", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/pil_engine.py#L52-L71
train
gtaylor/django-athumb
athumb/pial/engines/pil_engine.py
PILEngine._get_raw_data
def _get_raw_data(self, image, format, quality): """ Returns the raw data from the Image, which can be directly written to a something, be it a file-like object or a database. :param PIL.Image image: The image to get the raw data for. :param str format: The format to save to. If this value is ``None``, PIL will attempt to guess. You're almost always better off providing this yourself. For a full list of formats, see the PIL handbook at: http://www.pythonware.com/library/pil/handbook/index.htm The *Appendixes* section at the bottom, in particular. :param int quality: A quality level as a percent. The lower, the higher the compression, the worse the artifacts. Check the format's handbook page for what the different values for this mean. For example, JPEG's max quality level is 95, with 100 completely disabling JPEG quantization. :rtype: str :returns: A string representation of the image. """ ImageFile.MAXBLOCK = 1024 * 1024 buf = StringIO() try: # ptimize makes the encoder do a second pass over the image, if # the format supports it. image.save(buf, format=format, quality=quality, optimize=1) except IOError: # optimize is a no-go, omit it this attempt. image.save(buf, format=format, quality=quality) raw_data = buf.getvalue() buf.close() return raw_data
python
def _get_raw_data(self, image, format, quality): """ Returns the raw data from the Image, which can be directly written to a something, be it a file-like object or a database. :param PIL.Image image: The image to get the raw data for. :param str format: The format to save to. If this value is ``None``, PIL will attempt to guess. You're almost always better off providing this yourself. For a full list of formats, see the PIL handbook at: http://www.pythonware.com/library/pil/handbook/index.htm The *Appendixes* section at the bottom, in particular. :param int quality: A quality level as a percent. The lower, the higher the compression, the worse the artifacts. Check the format's handbook page for what the different values for this mean. For example, JPEG's max quality level is 95, with 100 completely disabling JPEG quantization. :rtype: str :returns: A string representation of the image. """ ImageFile.MAXBLOCK = 1024 * 1024 buf = StringIO() try: # ptimize makes the encoder do a second pass over the image, if # the format supports it. image.save(buf, format=format, quality=quality, optimize=1) except IOError: # optimize is a no-go, omit it this attempt. image.save(buf, format=format, quality=quality) raw_data = buf.getvalue() buf.close() return raw_data
[ "def", "_get_raw_data", "(", "self", ",", "image", ",", "format", ",", "quality", ")", ":", "ImageFile", ".", "MAXBLOCK", "=", "1024", "*", "1024", "buf", "=", "StringIO", "(", ")", "try", ":", "# ptimize makes the encoder do a second pass over the image, if", "# the format supports it.", "image", ".", "save", "(", "buf", ",", "format", "=", "format", ",", "quality", "=", "quality", ",", "optimize", "=", "1", ")", "except", "IOError", ":", "# optimize is a no-go, omit it this attempt.", "image", ".", "save", "(", "buf", ",", "format", "=", "format", ",", "quality", "=", "quality", ")", "raw_data", "=", "buf", ".", "getvalue", "(", ")", "buf", ".", "close", "(", ")", "return", "raw_data" ]
Returns the raw data from the Image, which can be directly written to a something, be it a file-like object or a database. :param PIL.Image image: The image to get the raw data for. :param str format: The format to save to. If this value is ``None``, PIL will attempt to guess. You're almost always better off providing this yourself. For a full list of formats, see the PIL handbook at: http://www.pythonware.com/library/pil/handbook/index.htm The *Appendixes* section at the bottom, in particular. :param int quality: A quality level as a percent. The lower, the higher the compression, the worse the artifacts. Check the format's handbook page for what the different values for this mean. For example, JPEG's max quality level is 95, with 100 completely disabling JPEG quantization. :rtype: str :returns: A string representation of the image.
[ "Returns", "the", "raw", "data", "from", "the", "Image", "which", "can", "be", "directly", "written", "to", "a", "something", "be", "it", "a", "file", "-", "like", "object", "or", "a", "database", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/pial/engines/pil_engine.py#L101-L134
train
digidotcom/python-wvalib
wva/stream.py
WVAEventStream.enable
def enable(self): """Enable the stream thread This operation will ensure that the thread that is responsible for connecting to the WVA and triggering event callbacks is started. This thread will continue to run and do what it needs to do to maintain a connection to the WVA. The status of the thread can be monitored by calling :meth:`get_status`. """ with self._lock: if self._event_listener_thread is None: self._event_listener_thread = WVAEventListenerThread(self, self._http_client) self._event_listener_thread.start()
python
def enable(self): """Enable the stream thread This operation will ensure that the thread that is responsible for connecting to the WVA and triggering event callbacks is started. This thread will continue to run and do what it needs to do to maintain a connection to the WVA. The status of the thread can be monitored by calling :meth:`get_status`. """ with self._lock: if self._event_listener_thread is None: self._event_listener_thread = WVAEventListenerThread(self, self._http_client) self._event_listener_thread.start()
[ "def", "enable", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_event_listener_thread", "is", "None", ":", "self", ".", "_event_listener_thread", "=", "WVAEventListenerThread", "(", "self", ",", "self", ".", "_http_client", ")", "self", ".", "_event_listener_thread", ".", "start", "(", ")" ]
Enable the stream thread This operation will ensure that the thread that is responsible for connecting to the WVA and triggering event callbacks is started. This thread will continue to run and do what it needs to do to maintain a connection to the WVA. The status of the thread can be monitored by calling :meth:`get_status`.
[ "Enable", "the", "stream", "thread" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/stream.py#L49-L62
train
digidotcom/python-wvalib
wva/stream.py
WVAEventStream.disable
def disable(self): """Disconnect from the event stream""" with self._lock: if self._event_listener_thread is not None: self._event_listener_thread.stop() self._event_listener_thread = None
python
def disable(self): """Disconnect from the event stream""" with self._lock: if self._event_listener_thread is not None: self._event_listener_thread.stop() self._event_listener_thread = None
[ "def", "disable", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_event_listener_thread", "is", "not", "None", ":", "self", ".", "_event_listener_thread", ".", "stop", "(", ")", "self", ".", "_event_listener_thread", "=", "None" ]
Disconnect from the event stream
[ "Disconnect", "from", "the", "event", "stream" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/stream.py#L64-L69
train
digidotcom/python-wvalib
wva/stream.py
WVAEventStream.get_status
def get_status(self): """Get the current status of the event stream system The status will be one of the following: - EVENT_STREAM_STATE_STOPPED: if the stream thread has not been enabled - EVENT_STREAM_STATE_CONNECTING: the stream thread is running and attempting to establish a connection to the WVA to receive events. - EVENT_STREAM_STATE_CONNECTED: We are connected to the WVA and receiving or ready to receive events. If no events are being received, one should verify that vehicle data is being received and there is an appropriate set of subscriptions set up. """ with self._lock: if self._event_listener_thread is None: return EVENT_STREAM_STATE_DISABLED else: return self._event_listener_thread.get_state()
python
def get_status(self): """Get the current status of the event stream system The status will be one of the following: - EVENT_STREAM_STATE_STOPPED: if the stream thread has not been enabled - EVENT_STREAM_STATE_CONNECTING: the stream thread is running and attempting to establish a connection to the WVA to receive events. - EVENT_STREAM_STATE_CONNECTED: We are connected to the WVA and receiving or ready to receive events. If no events are being received, one should verify that vehicle data is being received and there is an appropriate set of subscriptions set up. """ with self._lock: if self._event_listener_thread is None: return EVENT_STREAM_STATE_DISABLED else: return self._event_listener_thread.get_state()
[ "def", "get_status", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_event_listener_thread", "is", "None", ":", "return", "EVENT_STREAM_STATE_DISABLED", "else", ":", "return", "self", ".", "_event_listener_thread", ".", "get_state", "(", ")" ]
Get the current status of the event stream system The status will be one of the following: - EVENT_STREAM_STATE_STOPPED: if the stream thread has not been enabled - EVENT_STREAM_STATE_CONNECTING: the stream thread is running and attempting to establish a connection to the WVA to receive events. - EVENT_STREAM_STATE_CONNECTED: We are connected to the WVA and receiving or ready to receive events. If no events are being received, one should verify that vehicle data is being received and there is an appropriate set of subscriptions set up.
[ "Get", "the", "current", "status", "of", "the", "event", "stream", "system" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/stream.py#L71-L88
train
digidotcom/python-wvalib
wva/stream.py
WVAEventListenerThread._parse_one_event
def _parse_one_event(self): """Parse the stream buffer and return either a single event or None""" # WVA includes \r\n between messages which the parser doesn't like, so we # throw away any data before a opening brace try: open_brace_idx = self._buf.index('{') except ValueError: self._buf = six.u('') # no brace found else: if open_brace_idx > 0: self._buf = self._buf[open_brace_idx:] try: event, idx = self._decoder.raw_decode(self._buf) self._buf = self._buf[idx:] return event except ValueError: return None
python
def _parse_one_event(self): """Parse the stream buffer and return either a single event or None""" # WVA includes \r\n between messages which the parser doesn't like, so we # throw away any data before a opening brace try: open_brace_idx = self._buf.index('{') except ValueError: self._buf = six.u('') # no brace found else: if open_brace_idx > 0: self._buf = self._buf[open_brace_idx:] try: event, idx = self._decoder.raw_decode(self._buf) self._buf = self._buf[idx:] return event except ValueError: return None
[ "def", "_parse_one_event", "(", "self", ")", ":", "# WVA includes \\r\\n between messages which the parser doesn't like, so we", "# throw away any data before a opening brace", "try", ":", "open_brace_idx", "=", "self", ".", "_buf", ".", "index", "(", "'{'", ")", "except", "ValueError", ":", "self", ".", "_buf", "=", "six", ".", "u", "(", "''", ")", "# no brace found", "else", ":", "if", "open_brace_idx", ">", "0", ":", "self", ".", "_buf", "=", "self", ".", "_buf", "[", "open_brace_idx", ":", "]", "try", ":", "event", ",", "idx", "=", "self", ".", "_decoder", ".", "raw_decode", "(", "self", ".", "_buf", ")", "self", ".", "_buf", "=", "self", ".", "_buf", "[", "idx", ":", "]", "return", "event", "except", "ValueError", ":", "return", "None" ]
Parse the stream buffer and return either a single event or None
[ "Parse", "the", "stream", "buffer", "and", "return", "either", "a", "single", "event", "or", "None" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/stream.py#L144-L161
train
andy-z/ged4py
ged4py/parser.py
guess_codec
def guess_codec(file, errors="strict", require_char=False): """Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default). """ # mapping of gedcom character set specifiers to Python encoding names gedcom_char_to_codec = { 'ansel': 'gedcom', } # check BOM first bom_codec = check_bom(file) bom_size = file.tell() codec = bom_codec or 'gedcom' # scan header until CHAR or end of header while True: # this stops at '\n' line = file.readline() if not line: raise IOError("Unexpected EOF while reading GEDCOM header") # do not decode bytes to strings here, reason is that some # stupid apps split CONC record at byte level (in middle of # of multi-byte characters). This implies that we can only # work with encodings that have ASCII as single-byte subset. line = line.lstrip().rstrip(b"\r\n") words = line.split() if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD": # past header but have not seen CHAR if require_char: raise CodecError("GEDCOM header does not have CHAR record") else: break elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR": try: encoding = words[2].decode(codec, errors) encoding = gedcom_char_to_codec.get(encoding.lower(), encoding.lower()) new_codec = codecs.lookup(encoding).name except LookupError: raise CodecError("Unknown codec name {0}".format(encoding)) if bom_codec is None: codec = new_codec elif new_codec != bom_codec: raise CodecError("CHAR codec {0} is different from BOM " "codec {1}".format(new_codec, bom_codec)) break return codec, bom_size
python
def guess_codec(file, errors="strict", require_char=False): """Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default). """ # mapping of gedcom character set specifiers to Python encoding names gedcom_char_to_codec = { 'ansel': 'gedcom', } # check BOM first bom_codec = check_bom(file) bom_size = file.tell() codec = bom_codec or 'gedcom' # scan header until CHAR or end of header while True: # this stops at '\n' line = file.readline() if not line: raise IOError("Unexpected EOF while reading GEDCOM header") # do not decode bytes to strings here, reason is that some # stupid apps split CONC record at byte level (in middle of # of multi-byte characters). This implies that we can only # work with encodings that have ASCII as single-byte subset. line = line.lstrip().rstrip(b"\r\n") words = line.split() if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD": # past header but have not seen CHAR if require_char: raise CodecError("GEDCOM header does not have CHAR record") else: break elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR": try: encoding = words[2].decode(codec, errors) encoding = gedcom_char_to_codec.get(encoding.lower(), encoding.lower()) new_codec = codecs.lookup(encoding).name except LookupError: raise CodecError("Unknown codec name {0}".format(encoding)) if bom_codec is None: codec = new_codec elif new_codec != bom_codec: raise CodecError("CHAR codec {0} is different from BOM " "codec {1}".format(new_codec, bom_codec)) break return codec, bom_size
[ "def", "guess_codec", "(", "file", ",", "errors", "=", "\"strict\"", ",", "require_char", "=", "False", ")", ":", "# mapping of gedcom character set specifiers to Python encoding names", "gedcom_char_to_codec", "=", "{", "'ansel'", ":", "'gedcom'", ",", "}", "# check BOM first", "bom_codec", "=", "check_bom", "(", "file", ")", "bom_size", "=", "file", ".", "tell", "(", ")", "codec", "=", "bom_codec", "or", "'gedcom'", "# scan header until CHAR or end of header", "while", "True", ":", "# this stops at '\\n'", "line", "=", "file", ".", "readline", "(", ")", "if", "not", "line", ":", "raise", "IOError", "(", "\"Unexpected EOF while reading GEDCOM header\"", ")", "# do not decode bytes to strings here, reason is that some", "# stupid apps split CONC record at byte level (in middle of", "# of multi-byte characters). This implies that we can only", "# work with encodings that have ASCII as single-byte subset.", "line", "=", "line", ".", "lstrip", "(", ")", ".", "rstrip", "(", "b\"\\r\\n\"", ")", "words", "=", "line", ".", "split", "(", ")", "if", "len", "(", "words", ")", ">=", "2", "and", "words", "[", "0", "]", "==", "b\"0\"", "and", "words", "[", "1", "]", "!=", "b\"HEAD\"", ":", "# past header but have not seen CHAR", "if", "require_char", ":", "raise", "CodecError", "(", "\"GEDCOM header does not have CHAR record\"", ")", "else", ":", "break", "elif", "len", "(", "words", ")", ">=", "3", "and", "words", "[", "0", "]", "==", "b\"1\"", "and", "words", "[", "1", "]", "==", "b\"CHAR\"", ":", "try", ":", "encoding", "=", "words", "[", "2", "]", ".", "decode", "(", "codec", ",", "errors", ")", "encoding", "=", "gedcom_char_to_codec", ".", "get", "(", "encoding", ".", "lower", "(", ")", ",", "encoding", ".", "lower", "(", ")", ")", "new_codec", "=", "codecs", ".", "lookup", "(", "encoding", ")", ".", "name", "except", "LookupError", ":", "raise", "CodecError", "(", "\"Unknown codec name {0}\"", ".", "format", "(", "encoding", ")", ")", "if", "bom_codec", "is", "None", ":", "codec", "=", "new_codec", "elif", "new_codec", "!=", "bom_codec", ":", "raise", "CodecError", "(", "\"CHAR codec {0} is different from BOM \"", "\"codec {1}\"", ".", "format", "(", "new_codec", ",", "bom_codec", ")", ")", "break", "return", "codec", ",", "bom_size" ]
Look at file contents and guess its correct encoding. File must be open in binary mode and positioned at offset 0. If BOM record is present then it is assumed to be UTF-8 or UTF-16 encoded file. GEDCOM header is searched for CHAR record and encoding name is extracted from it, if BOM record is present then CHAR record must match BOM-defined encoding. :param file: File object, must be open in binary mode. :param str errors: Controls error handling behavior during string decoding, accepts same values as standard `codecs.decode` method. :param bool require_char: If True then exception is thrown if CHAR record is not found in a header, if False and CHAR is not in the header then codec determined from BOM or "gedcom" is returned. :returns: Tuple (codec_name, bom_size) :raises: :py:class:`CodecError` when codec name in file is unknown or when codec name in file contradicts codec determined from BOM. :raises: :py:class:`UnicodeDecodeError` when codec fails to decode input lines and `errors` is set to "strict" (default).
[ "Look", "at", "file", "contents", "and", "guess", "its", "correct", "encoding", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/parser.py#L60-L129
train
andy-z/ged4py
ged4py/parser.py
GedcomReader.records0
def records0(self, tag=None): """Iterator over all level=0 records. :param str tag: If ``None`` is given (default) then return all level=0 records, otherwise return level=0 records with the given tag. """ _log.debug("in records0") for offset, xtag in self.index0: _log.debug(" records0: offset: %s; xtag: %s", offset, xtag) if tag is None or tag == xtag: yield self.read_record(offset)
python
def records0(self, tag=None): """Iterator over all level=0 records. :param str tag: If ``None`` is given (default) then return all level=0 records, otherwise return level=0 records with the given tag. """ _log.debug("in records0") for offset, xtag in self.index0: _log.debug(" records0: offset: %s; xtag: %s", offset, xtag) if tag is None or tag == xtag: yield self.read_record(offset)
[ "def", "records0", "(", "self", ",", "tag", "=", "None", ")", ":", "_log", ".", "debug", "(", "\"in records0\"", ")", "for", "offset", ",", "xtag", "in", "self", ".", "index0", ":", "_log", ".", "debug", "(", "\" records0: offset: %s; xtag: %s\"", ",", "offset", ",", "xtag", ")", "if", "tag", "is", "None", "or", "tag", "==", "xtag", ":", "yield", "self", ".", "read_record", "(", "offset", ")" ]
Iterator over all level=0 records. :param str tag: If ``None`` is given (default) then return all level=0 records, otherwise return level=0 records with the given tag.
[ "Iterator", "over", "all", "level", "=", "0", "records", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/parser.py#L319-L329
train
andy-z/ged4py
ged4py/parser.py
GedcomReader.read_record
def read_record(self, offset): """Read next complete record from a file starting at given position. Reads the record at given position and all its sub-records. Stops reading at EOF or next record with the same or higher (smaller) level number. File position after return from this method is not specified, re-position file if you want to read other records. :param int offset: Position in file to start reading from. :return: :py:class:`model.Record` instance or None if offset points past EOF. :raises: :py:exc:`ParserError` if `offsets` does not point to the beginning of a record or for any parsing errors. """ _log.debug("in read_record(%s)", offset) stack = [] # stores per-level current records reclevel = None for gline in self.gedcom_lines(offset): _log.debug(" read_record, gline: %s", gline) level = gline.level if reclevel is None: # this is the first record, remember its level reclevel = level elif level <= reclevel: # stop at the record of the same or higher (smaller) level break # All previously seen records at this level and below can # be finalized now for rec in reversed(stack[level:]): # decode bytes value into string if rec: if rec.value is not None: rec.value = rec.value.decode(self._encoding, self._errors) rec.freeze() # _log.debug(" read_record, rec: %s", rec) del stack[level + 1:] # extend stack to fit this level (and make parent levels if needed) stack.extend([None] * (level + 1 - len(stack))) # make Record out of it (it can be updated later) parent = stack[level - 1] if level > 0 else None rec = self._make_record(parent, gline) # store as current record at this level stack[level] = rec for rec in reversed(stack[reclevel:]): if rec: if rec.value is not None: rec.value = rec.value.decode(self._encoding, self._errors) rec.freeze() _log.debug(" read_record, rec: %s", rec) return stack[reclevel] if stack else None
python
def read_record(self, offset): """Read next complete record from a file starting at given position. Reads the record at given position and all its sub-records. Stops reading at EOF or next record with the same or higher (smaller) level number. File position after return from this method is not specified, re-position file if you want to read other records. :param int offset: Position in file to start reading from. :return: :py:class:`model.Record` instance or None if offset points past EOF. :raises: :py:exc:`ParserError` if `offsets` does not point to the beginning of a record or for any parsing errors. """ _log.debug("in read_record(%s)", offset) stack = [] # stores per-level current records reclevel = None for gline in self.gedcom_lines(offset): _log.debug(" read_record, gline: %s", gline) level = gline.level if reclevel is None: # this is the first record, remember its level reclevel = level elif level <= reclevel: # stop at the record of the same or higher (smaller) level break # All previously seen records at this level and below can # be finalized now for rec in reversed(stack[level:]): # decode bytes value into string if rec: if rec.value is not None: rec.value = rec.value.decode(self._encoding, self._errors) rec.freeze() # _log.debug(" read_record, rec: %s", rec) del stack[level + 1:] # extend stack to fit this level (and make parent levels if needed) stack.extend([None] * (level + 1 - len(stack))) # make Record out of it (it can be updated later) parent = stack[level - 1] if level > 0 else None rec = self._make_record(parent, gline) # store as current record at this level stack[level] = rec for rec in reversed(stack[reclevel:]): if rec: if rec.value is not None: rec.value = rec.value.decode(self._encoding, self._errors) rec.freeze() _log.debug(" read_record, rec: %s", rec) return stack[reclevel] if stack else None
[ "def", "read_record", "(", "self", ",", "offset", ")", ":", "_log", ".", "debug", "(", "\"in read_record(%s)\"", ",", "offset", ")", "stack", "=", "[", "]", "# stores per-level current records", "reclevel", "=", "None", "for", "gline", "in", "self", ".", "gedcom_lines", "(", "offset", ")", ":", "_log", ".", "debug", "(", "\" read_record, gline: %s\"", ",", "gline", ")", "level", "=", "gline", ".", "level", "if", "reclevel", "is", "None", ":", "# this is the first record, remember its level", "reclevel", "=", "level", "elif", "level", "<=", "reclevel", ":", "# stop at the record of the same or higher (smaller) level", "break", "# All previously seen records at this level and below can", "# be finalized now", "for", "rec", "in", "reversed", "(", "stack", "[", "level", ":", "]", ")", ":", "# decode bytes value into string", "if", "rec", ":", "if", "rec", ".", "value", "is", "not", "None", ":", "rec", ".", "value", "=", "rec", ".", "value", ".", "decode", "(", "self", ".", "_encoding", ",", "self", ".", "_errors", ")", "rec", ".", "freeze", "(", ")", "# _log.debug(\" read_record, rec: %s\", rec)", "del", "stack", "[", "level", "+", "1", ":", "]", "# extend stack to fit this level (and make parent levels if needed)", "stack", ".", "extend", "(", "[", "None", "]", "*", "(", "level", "+", "1", "-", "len", "(", "stack", ")", ")", ")", "# make Record out of it (it can be updated later)", "parent", "=", "stack", "[", "level", "-", "1", "]", "if", "level", ">", "0", "else", "None", "rec", "=", "self", ".", "_make_record", "(", "parent", ",", "gline", ")", "# store as current record at this level", "stack", "[", "level", "]", "=", "rec", "for", "rec", "in", "reversed", "(", "stack", "[", "reclevel", ":", "]", ")", ":", "if", "rec", ":", "if", "rec", ".", "value", "is", "not", "None", ":", "rec", ".", "value", "=", "rec", ".", "value", ".", "decode", "(", "self", ".", "_encoding", ",", "self", ".", "_errors", ")", "rec", ".", "freeze", "(", ")", "_log", ".", "debug", "(", "\" read_record, rec: %s\"", ",", "rec", ")", "return", "stack", "[", "reclevel", "]", "if", "stack", "else", "None" ]
Read next complete record from a file starting at given position. Reads the record at given position and all its sub-records. Stops reading at EOF or next record with the same or higher (smaller) level number. File position after return from this method is not specified, re-position file if you want to read other records. :param int offset: Position in file to start reading from. :return: :py:class:`model.Record` instance or None if offset points past EOF. :raises: :py:exc:`ParserError` if `offsets` does not point to the beginning of a record or for any parsing errors.
[ "Read", "next", "complete", "record", "from", "a", "file", "starting", "at", "given", "position", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/parser.py#L331-L388
train
andy-z/ged4py
ged4py/parser.py
GedcomReader._make_record
def _make_record(self, parent, gline): """Process next record. This method created new record from the line read from file if needed and/or updates its parent record. If the parent record tag is ``BLOB`` and new record tag is ``CONT`` then record is skipped entirely and None is returned. Otherwise if new record tag is ``CONT`` or ``CONC`` its value is added to parent value. For all other tags new record is made and it is added to parent sub_records attribute. Parameters ---------- parent : `model.Record` Parent record of the new record gline : `gedcom_line` Current parsed line Returns ------- `model.Record` or None """ if parent and gline.tag in ("CONT", "CONC"): # concatenate, only for non-BLOBs if parent.tag != "BLOB": # have to be careful concatenating empty/None values value = gline.value if gline.tag == "CONT": value = b"\n" + (value or b"") if value is not None: parent.value = (parent.value or b"") + value return None # avoid infinite cycle dialect = model.DIALECT_DEFAULT if not (gline.level == 0 and gline.tag == "HEAD") and self._header: dialect = self.dialect rec = model.make_record(level=gline.level, xref_id=gline.xref_id, tag=gline.tag, value=gline.value, sub_records=[], offset=gline.offset, dialect=dialect, parser=self) # add to parent's sub-records list if parent: parent.sub_records.append(rec) return rec
python
def _make_record(self, parent, gline): """Process next record. This method created new record from the line read from file if needed and/or updates its parent record. If the parent record tag is ``BLOB`` and new record tag is ``CONT`` then record is skipped entirely and None is returned. Otherwise if new record tag is ``CONT`` or ``CONC`` its value is added to parent value. For all other tags new record is made and it is added to parent sub_records attribute. Parameters ---------- parent : `model.Record` Parent record of the new record gline : `gedcom_line` Current parsed line Returns ------- `model.Record` or None """ if parent and gline.tag in ("CONT", "CONC"): # concatenate, only for non-BLOBs if parent.tag != "BLOB": # have to be careful concatenating empty/None values value = gline.value if gline.tag == "CONT": value = b"\n" + (value or b"") if value is not None: parent.value = (parent.value or b"") + value return None # avoid infinite cycle dialect = model.DIALECT_DEFAULT if not (gline.level == 0 and gline.tag == "HEAD") and self._header: dialect = self.dialect rec = model.make_record(level=gline.level, xref_id=gline.xref_id, tag=gline.tag, value=gline.value, sub_records=[], offset=gline.offset, dialect=dialect, parser=self) # add to parent's sub-records list if parent: parent.sub_records.append(rec) return rec
[ "def", "_make_record", "(", "self", ",", "parent", ",", "gline", ")", ":", "if", "parent", "and", "gline", ".", "tag", "in", "(", "\"CONT\"", ",", "\"CONC\"", ")", ":", "# concatenate, only for non-BLOBs", "if", "parent", ".", "tag", "!=", "\"BLOB\"", ":", "# have to be careful concatenating empty/None values", "value", "=", "gline", ".", "value", "if", "gline", ".", "tag", "==", "\"CONT\"", ":", "value", "=", "b\"\\n\"", "+", "(", "value", "or", "b\"\"", ")", "if", "value", "is", "not", "None", ":", "parent", ".", "value", "=", "(", "parent", ".", "value", "or", "b\"\"", ")", "+", "value", "return", "None", "# avoid infinite cycle", "dialect", "=", "model", ".", "DIALECT_DEFAULT", "if", "not", "(", "gline", ".", "level", "==", "0", "and", "gline", ".", "tag", "==", "\"HEAD\"", ")", "and", "self", ".", "_header", ":", "dialect", "=", "self", ".", "dialect", "rec", "=", "model", ".", "make_record", "(", "level", "=", "gline", ".", "level", ",", "xref_id", "=", "gline", ".", "xref_id", ",", "tag", "=", "gline", ".", "tag", ",", "value", "=", "gline", ".", "value", ",", "sub_records", "=", "[", "]", ",", "offset", "=", "gline", ".", "offset", ",", "dialect", "=", "dialect", ",", "parser", "=", "self", ")", "# add to parent's sub-records list", "if", "parent", ":", "parent", ".", "sub_records", ".", "append", "(", "rec", ")", "return", "rec" ]
Process next record. This method created new record from the line read from file if needed and/or updates its parent record. If the parent record tag is ``BLOB`` and new record tag is ``CONT`` then record is skipped entirely and None is returned. Otherwise if new record tag is ``CONT`` or ``CONC`` its value is added to parent value. For all other tags new record is made and it is added to parent sub_records attribute. Parameters ---------- parent : `model.Record` Parent record of the new record gline : `gedcom_line` Current parsed line Returns ------- `model.Record` or None
[ "Process", "next", "record", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/parser.py#L390-L436
train
newfies-dialer/python-msspeak
msspeak/command_line.py
validate_options
def validate_options(subscription_key, text): """ Perform sanity checks on threshold values """ if not subscription_key or len(subscription_key) == 0: print 'Error: Warning the option subscription_key should contain a string.' print USAGE sys.exit(3) if not text or len(text) == 0: print 'Error: Warning the option text should contain a string.' print USAGE sys.exit(3)
python
def validate_options(subscription_key, text): """ Perform sanity checks on threshold values """ if not subscription_key or len(subscription_key) == 0: print 'Error: Warning the option subscription_key should contain a string.' print USAGE sys.exit(3) if not text or len(text) == 0: print 'Error: Warning the option text should contain a string.' print USAGE sys.exit(3)
[ "def", "validate_options", "(", "subscription_key", ",", "text", ")", ":", "if", "not", "subscription_key", "or", "len", "(", "subscription_key", ")", "==", "0", ":", "print", "'Error: Warning the option subscription_key should contain a string.'", "print", "USAGE", "sys", ".", "exit", "(", "3", ")", "if", "not", "text", "or", "len", "(", "text", ")", "==", "0", ":", "print", "'Error: Warning the option text should contain a string.'", "print", "USAGE", "sys", ".", "exit", "(", "3", ")" ]
Perform sanity checks on threshold values
[ "Perform", "sanity", "checks", "on", "threshold", "values" ]
106475122be73df152865c4fe6e9388caf974085
https://github.com/newfies-dialer/python-msspeak/blob/106475122be73df152865c4fe6e9388caf974085/msspeak/command_line.py#L40-L51
train
newfies-dialer/python-msspeak
msspeak/command_line.py
main
def main(): """ Parse options and process text to Microsoft Translate """ # Parse arguments parser = OptionParser() parser.add_option('-n', '--subscription_key', dest='subscription_key', help='subscription_key for authentication') parser.add_option('-t', '--text', dest='text', help='text to synthesize') parser.add_option('-l', '--language', dest='language', help='language') parser.add_option('-g', '--gender', dest='gender', help='gender') parser.add_option('-d', '--directory', dest='directory', help='directory to store the file') (options, args) = parser.parse_args() subscription_key = options.subscription_key text = options.text language = options.language gender = options.gender directory = options.directory # Perform sanity checks on options validate_options(subscription_key, text) if not directory: directory = default_directory if not language: language = default_language if not gender: gender = default_gender # format = 'riff-16khz-16bit-mono-pcm' format = 'riff-8khz-8bit-mono-mulaw' # lang = 'en-AU' # gender = 'Female' tts_msspeak = MSSpeak(subscription_key, '/tmp/') tts_msspeak.set_cache(False) output_filename = tts_msspeak.speak(text, language, gender, format) print 'Recorded TTS to %s%s' % (directory, output_filename)
python
def main(): """ Parse options and process text to Microsoft Translate """ # Parse arguments parser = OptionParser() parser.add_option('-n', '--subscription_key', dest='subscription_key', help='subscription_key for authentication') parser.add_option('-t', '--text', dest='text', help='text to synthesize') parser.add_option('-l', '--language', dest='language', help='language') parser.add_option('-g', '--gender', dest='gender', help='gender') parser.add_option('-d', '--directory', dest='directory', help='directory to store the file') (options, args) = parser.parse_args() subscription_key = options.subscription_key text = options.text language = options.language gender = options.gender directory = options.directory # Perform sanity checks on options validate_options(subscription_key, text) if not directory: directory = default_directory if not language: language = default_language if not gender: gender = default_gender # format = 'riff-16khz-16bit-mono-pcm' format = 'riff-8khz-8bit-mono-mulaw' # lang = 'en-AU' # gender = 'Female' tts_msspeak = MSSpeak(subscription_key, '/tmp/') tts_msspeak.set_cache(False) output_filename = tts_msspeak.speak(text, language, gender, format) print 'Recorded TTS to %s%s' % (directory, output_filename)
[ "def", "main", "(", ")", ":", "# Parse arguments", "parser", "=", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'-n'", ",", "'--subscription_key'", ",", "dest", "=", "'subscription_key'", ",", "help", "=", "'subscription_key for authentication'", ")", "parser", ".", "add_option", "(", "'-t'", ",", "'--text'", ",", "dest", "=", "'text'", ",", "help", "=", "'text to synthesize'", ")", "parser", ".", "add_option", "(", "'-l'", ",", "'--language'", ",", "dest", "=", "'language'", ",", "help", "=", "'language'", ")", "parser", ".", "add_option", "(", "'-g'", ",", "'--gender'", ",", "dest", "=", "'gender'", ",", "help", "=", "'gender'", ")", "parser", ".", "add_option", "(", "'-d'", ",", "'--directory'", ",", "dest", "=", "'directory'", ",", "help", "=", "'directory to store the file'", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "subscription_key", "=", "options", ".", "subscription_key", "text", "=", "options", ".", "text", "language", "=", "options", ".", "language", "gender", "=", "options", ".", "gender", "directory", "=", "options", ".", "directory", "# Perform sanity checks on options", "validate_options", "(", "subscription_key", ",", "text", ")", "if", "not", "directory", ":", "directory", "=", "default_directory", "if", "not", "language", ":", "language", "=", "default_language", "if", "not", "gender", ":", "gender", "=", "default_gender", "# format = 'riff-16khz-16bit-mono-pcm'", "format", "=", "'riff-8khz-8bit-mono-mulaw'", "# lang = 'en-AU'", "# gender = 'Female'", "tts_msspeak", "=", "MSSpeak", "(", "subscription_key", ",", "'/tmp/'", ")", "tts_msspeak", ".", "set_cache", "(", "False", ")", "output_filename", "=", "tts_msspeak", ".", "speak", "(", "text", ",", "language", ",", "gender", ",", "format", ")", "print", "'Recorded TTS to %s%s'", "%", "(", "directory", ",", "output_filename", ")" ]
Parse options and process text to Microsoft Translate
[ "Parse", "options", "and", "process", "text", "to", "Microsoft", "Translate" ]
106475122be73df152865c4fe6e9388caf974085
https://github.com/newfies-dialer/python-msspeak/blob/106475122be73df152865c4fe6e9388caf974085/msspeak/command_line.py#L54-L99
train
digidotcom/python-wvalib
wva/vehicle.py
VehicleDataElement.sample
def sample(self): """Get the current value of this vehicle data element The returned value will be a namedtuple with 'value' and 'timestamp' elements. Example:: speed_el = wva.get_vehicle_data_element('VehicleSpeed') for i in xrange(10): speed = speed_el.sample() print("Speed: %0.2f @ %s" % (speed.value, speed.timestamp)) time.sleep(1) """ # Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}} data = self._http_client.get("vehicle/data/{}".format(self.name))[self.name] dt = arrow.get(data["timestamp"]).datetime value = data["value"] return VehicleDataSample(value, dt)
python
def sample(self): """Get the current value of this vehicle data element The returned value will be a namedtuple with 'value' and 'timestamp' elements. Example:: speed_el = wva.get_vehicle_data_element('VehicleSpeed') for i in xrange(10): speed = speed_el.sample() print("Speed: %0.2f @ %s" % (speed.value, speed.timestamp)) time.sleep(1) """ # Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}} data = self._http_client.get("vehicle/data/{}".format(self.name))[self.name] dt = arrow.get(data["timestamp"]).datetime value = data["value"] return VehicleDataSample(value, dt)
[ "def", "sample", "(", "self", ")", ":", "# Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}}", "data", "=", "self", ".", "_http_client", ".", "get", "(", "\"vehicle/data/{}\"", ".", "format", "(", "self", ".", "name", ")", ")", "[", "self", ".", "name", "]", "dt", "=", "arrow", ".", "get", "(", "data", "[", "\"timestamp\"", "]", ")", ".", "datetime", "value", "=", "data", "[", "\"value\"", "]", "return", "VehicleDataSample", "(", "value", ",", "dt", ")" ]
Get the current value of this vehicle data element The returned value will be a namedtuple with 'value' and 'timestamp' elements. Example:: speed_el = wva.get_vehicle_data_element('VehicleSpeed') for i in xrange(10): speed = speed_el.sample() print("Speed: %0.2f @ %s" % (speed.value, speed.timestamp)) time.sleep(1)
[ "Get", "the", "current", "value", "of", "this", "vehicle", "data", "element" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/vehicle.py#L20-L36
train
gtaylor/django-athumb
athumb/backends/s3boto.py
S3BotoStorage._get_or_create_bucket
def _get_or_create_bucket(self, name): """Retrieves a bucket if it exists, otherwise creates it.""" try: return self.connection.get_bucket(name) except S3ResponseError, e: if AUTO_CREATE_BUCKET: return self.connection.create_bucket(name) raise ImproperlyConfigured, ("Bucket specified by " "AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be " "automatically created by setting AWS_AUTO_CREATE_BUCKET=True")
python
def _get_or_create_bucket(self, name): """Retrieves a bucket if it exists, otherwise creates it.""" try: return self.connection.get_bucket(name) except S3ResponseError, e: if AUTO_CREATE_BUCKET: return self.connection.create_bucket(name) raise ImproperlyConfigured, ("Bucket specified by " "AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be " "automatically created by setting AWS_AUTO_CREATE_BUCKET=True")
[ "def", "_get_or_create_bucket", "(", "self", ",", "name", ")", ":", "try", ":", "return", "self", ".", "connection", ".", "get_bucket", "(", "name", ")", "except", "S3ResponseError", ",", "e", ":", "if", "AUTO_CREATE_BUCKET", ":", "return", "self", ".", "connection", ".", "create_bucket", "(", "name", ")", "raise", "ImproperlyConfigured", ",", "(", "\"Bucket specified by \"", "\"AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be \"", "\"automatically created by setting AWS_AUTO_CREATE_BUCKET=True\"", ")" ]
Retrieves a bucket if it exists, otherwise creates it.
[ "Retrieves", "a", "bucket", "if", "it", "exists", "otherwise", "creates", "it", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/backends/s3boto.py#L128-L137
train
gtaylor/django-athumb
athumb/backends/s3boto.py
S3BotoStorage._compress_content
def _compress_content(self, content): """Gzip a given string.""" zbuf = StringIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(content.read()) zfile.close() content.file = zbuf return content
python
def _compress_content(self, content): """Gzip a given string.""" zbuf = StringIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(content.read()) zfile.close() content.file = zbuf return content
[ "def", "_compress_content", "(", "self", ",", "content", ")", ":", "zbuf", "=", "StringIO", "(", ")", "zfile", "=", "GzipFile", "(", "mode", "=", "'wb'", ",", "compresslevel", "=", "6", ",", "fileobj", "=", "zbuf", ")", "zfile", ".", "write", "(", "content", ".", "read", "(", ")", ")", "zfile", ".", "close", "(", ")", "content", ".", "file", "=", "zbuf", "return", "content" ]
Gzip a given string.
[ "Gzip", "a", "given", "string", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/backends/s3boto.py#L143-L150
train
gtaylor/django-athumb
athumb/backends/s3boto.py
S3BotoStorage_AllPublic.url
def url(self, name): """ Since we assume all public storage with no authorization keys, we can just simply dump out a URL rather than having to query S3 for new keys. """ name = urllib.quote_plus(self._clean_name(name), safe='/') if self.bucket_cname: return "http://%s/%s" % (self.bucket_cname, name) elif self.host: return "http://%s/%s/%s" % (self.host, self.bucket_name, name) # No host ? Then it's the default region return "http://s3.amazonaws.com/%s/%s" % (self.bucket_name, name)
python
def url(self, name): """ Since we assume all public storage with no authorization keys, we can just simply dump out a URL rather than having to query S3 for new keys. """ name = urllib.quote_plus(self._clean_name(name), safe='/') if self.bucket_cname: return "http://%s/%s" % (self.bucket_cname, name) elif self.host: return "http://%s/%s/%s" % (self.host, self.bucket_name, name) # No host ? Then it's the default region return "http://s3.amazonaws.com/%s/%s" % (self.bucket_name, name)
[ "def", "url", "(", "self", ",", "name", ")", ":", "name", "=", "urllib", ".", "quote_plus", "(", "self", ".", "_clean_name", "(", "name", ")", ",", "safe", "=", "'/'", ")", "if", "self", ".", "bucket_cname", ":", "return", "\"http://%s/%s\"", "%", "(", "self", ".", "bucket_cname", ",", "name", ")", "elif", "self", ".", "host", ":", "return", "\"http://%s/%s/%s\"", "%", "(", "self", ".", "host", ",", "self", ".", "bucket_name", ",", "name", ")", "# No host ? Then it's the default region", "return", "\"http://s3.amazonaws.com/%s/%s\"", "%", "(", "self", ".", "bucket_name", ",", "name", ")" ]
Since we assume all public storage with no authorization keys, we can just simply dump out a URL rather than having to query S3 for new keys.
[ "Since", "we", "assume", "all", "public", "storage", "with", "no", "authorization", "keys", "we", "can", "just", "simply", "dump", "out", "a", "URL", "rather", "than", "having", "to", "query", "S3", "for", "new", "keys", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/backends/s3boto.py#L260-L272
train
pennlabs/penn-sdk-python
penn/dining.py
normalize_weekly
def normalize_weekly(data): """Normalization for dining menu data""" if "tblMenu" not in data["result_data"]["Document"]: data["result_data"]["Document"]["tblMenu"] = [] if isinstance(data["result_data"]["Document"]["tblMenu"], dict): data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]] for day in data["result_data"]["Document"]["tblMenu"]: if "tblDayPart" not in day: continue if isinstance(day["tblDayPart"], dict): day["tblDayPart"] = [day["tblDayPart"]] for meal in day["tblDayPart"]: if isinstance(meal["tblStation"], dict): meal["tblStation"] = [meal["tblStation"]] for station in meal["tblStation"]: if isinstance(station["tblItem"], dict): station["tblItem"] = [station["tblItem"]] return data
python
def normalize_weekly(data): """Normalization for dining menu data""" if "tblMenu" not in data["result_data"]["Document"]: data["result_data"]["Document"]["tblMenu"] = [] if isinstance(data["result_data"]["Document"]["tblMenu"], dict): data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]] for day in data["result_data"]["Document"]["tblMenu"]: if "tblDayPart" not in day: continue if isinstance(day["tblDayPart"], dict): day["tblDayPart"] = [day["tblDayPart"]] for meal in day["tblDayPart"]: if isinstance(meal["tblStation"], dict): meal["tblStation"] = [meal["tblStation"]] for station in meal["tblStation"]: if isinstance(station["tblItem"], dict): station["tblItem"] = [station["tblItem"]] return data
[ "def", "normalize_weekly", "(", "data", ")", ":", "if", "\"tblMenu\"", "not", "in", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", ":", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", "=", "[", "]", "if", "isinstance", "(", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", ",", "dict", ")", ":", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", "=", "[", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", "]", "for", "day", "in", "data", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", ":", "if", "\"tblDayPart\"", "not", "in", "day", ":", "continue", "if", "isinstance", "(", "day", "[", "\"tblDayPart\"", "]", ",", "dict", ")", ":", "day", "[", "\"tblDayPart\"", "]", "=", "[", "day", "[", "\"tblDayPart\"", "]", "]", "for", "meal", "in", "day", "[", "\"tblDayPart\"", "]", ":", "if", "isinstance", "(", "meal", "[", "\"tblStation\"", "]", ",", "dict", ")", ":", "meal", "[", "\"tblStation\"", "]", "=", "[", "meal", "[", "\"tblStation\"", "]", "]", "for", "station", "in", "meal", "[", "\"tblStation\"", "]", ":", "if", "isinstance", "(", "station", "[", "\"tblItem\"", "]", ",", "dict", ")", ":", "station", "[", "\"tblItem\"", "]", "=", "[", "station", "[", "\"tblItem\"", "]", "]", "return", "data" ]
Normalization for dining menu data
[ "Normalization", "for", "dining", "menu", "data" ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L29-L46
train
pennlabs/penn-sdk-python
penn/dining.py
get_meals
def get_meals(v2_response, building_id): """Extract meals into old format from a DiningV2 JSON response""" result_data = v2_response["result_data"] meals = [] day_parts = result_data["days"][0]["cafes"][building_id]["dayparts"][0] for meal in day_parts: stations = [] for station in meal["stations"]: items = [] for item_id in station["items"]: item = result_data["items"][item_id] new_item = {} new_item["txtTitle"] = item["label"] new_item["txtPrice"] = "" new_item["txtNutritionInfo"] = "" new_item["txtDescription"] = item["description"] new_item["tblSide"] = "" new_item["tblFarmToFork"] = "" attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]] if len(attrs) == 1: new_item["tblAttributes"] = {"txtAttribute": attrs[0]} elif len(attrs) > 1: new_item["tblAttributes"] = {"txtAttribute": attrs} else: new_item["tblAttributes"] = "" if isinstance(item["options"], list): item["options"] = {} if "values" in item["options"]: for side in item["options"]["values"]: new_item["tblSide"] = {"txtSideName": side["label"]} items.append(new_item) stations.append({"tblItem": items, "txtStationDescription": station["label"]}) meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]}) return meals
python
def get_meals(v2_response, building_id): """Extract meals into old format from a DiningV2 JSON response""" result_data = v2_response["result_data"] meals = [] day_parts = result_data["days"][0]["cafes"][building_id]["dayparts"][0] for meal in day_parts: stations = [] for station in meal["stations"]: items = [] for item_id in station["items"]: item = result_data["items"][item_id] new_item = {} new_item["txtTitle"] = item["label"] new_item["txtPrice"] = "" new_item["txtNutritionInfo"] = "" new_item["txtDescription"] = item["description"] new_item["tblSide"] = "" new_item["tblFarmToFork"] = "" attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]] if len(attrs) == 1: new_item["tblAttributes"] = {"txtAttribute": attrs[0]} elif len(attrs) > 1: new_item["tblAttributes"] = {"txtAttribute": attrs} else: new_item["tblAttributes"] = "" if isinstance(item["options"], list): item["options"] = {} if "values" in item["options"]: for side in item["options"]["values"]: new_item["tblSide"] = {"txtSideName": side["label"]} items.append(new_item) stations.append({"tblItem": items, "txtStationDescription": station["label"]}) meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]}) return meals
[ "def", "get_meals", "(", "v2_response", ",", "building_id", ")", ":", "result_data", "=", "v2_response", "[", "\"result_data\"", "]", "meals", "=", "[", "]", "day_parts", "=", "result_data", "[", "\"days\"", "]", "[", "0", "]", "[", "\"cafes\"", "]", "[", "building_id", "]", "[", "\"dayparts\"", "]", "[", "0", "]", "for", "meal", "in", "day_parts", ":", "stations", "=", "[", "]", "for", "station", "in", "meal", "[", "\"stations\"", "]", ":", "items", "=", "[", "]", "for", "item_id", "in", "station", "[", "\"items\"", "]", ":", "item", "=", "result_data", "[", "\"items\"", "]", "[", "item_id", "]", "new_item", "=", "{", "}", "new_item", "[", "\"txtTitle\"", "]", "=", "item", "[", "\"label\"", "]", "new_item", "[", "\"txtPrice\"", "]", "=", "\"\"", "new_item", "[", "\"txtNutritionInfo\"", "]", "=", "\"\"", "new_item", "[", "\"txtDescription\"", "]", "=", "item", "[", "\"description\"", "]", "new_item", "[", "\"tblSide\"", "]", "=", "\"\"", "new_item", "[", "\"tblFarmToFork\"", "]", "=", "\"\"", "attrs", "=", "[", "{", "\"description\"", ":", "item", "[", "\"cor_icon\"", "]", "[", "attr", "]", "}", "for", "attr", "in", "item", "[", "\"cor_icon\"", "]", "]", "if", "len", "(", "attrs", ")", "==", "1", ":", "new_item", "[", "\"tblAttributes\"", "]", "=", "{", "\"txtAttribute\"", ":", "attrs", "[", "0", "]", "}", "elif", "len", "(", "attrs", ")", ">", "1", ":", "new_item", "[", "\"tblAttributes\"", "]", "=", "{", "\"txtAttribute\"", ":", "attrs", "}", "else", ":", "new_item", "[", "\"tblAttributes\"", "]", "=", "\"\"", "if", "isinstance", "(", "item", "[", "\"options\"", "]", ",", "list", ")", ":", "item", "[", "\"options\"", "]", "=", "{", "}", "if", "\"values\"", "in", "item", "[", "\"options\"", "]", ":", "for", "side", "in", "item", "[", "\"options\"", "]", "[", "\"values\"", "]", ":", "new_item", "[", "\"tblSide\"", "]", "=", "{", "\"txtSideName\"", ":", "side", "[", "\"label\"", "]", "}", "items", ".", "append", "(", "new_item", ")", "stations", ".", "append", "(", "{", "\"tblItem\"", ":", "items", ",", "\"txtStationDescription\"", ":", "station", "[", "\"label\"", "]", "}", ")", "meals", ".", "append", "(", "{", "\"tblStation\"", ":", "stations", ",", "\"txtDayPartDescription\"", ":", "meal", "[", "\"label\"", "]", "}", ")", "return", "meals" ]
Extract meals into old format from a DiningV2 JSON response
[ "Extract", "meals", "into", "old", "format", "from", "a", "DiningV2", "JSON", "response" ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L49-L82
train
pennlabs/penn-sdk-python
penn/dining.py
DiningV2.menu
def menu(self, venue_id, date): """Get the menu for the venue corresponding to venue_id, on date. :param venue_id: A string representing the id of a venue, e.g. "abc". :param date: A string representing the date of a venue's menu, e.g. "2015-09-20". >>> commons_menu = din.menu("593", "2015-09-20") """ query = "&date=" + date response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query) return response
python
def menu(self, venue_id, date): """Get the menu for the venue corresponding to venue_id, on date. :param venue_id: A string representing the id of a venue, e.g. "abc". :param date: A string representing the date of a venue's menu, e.g. "2015-09-20". >>> commons_menu = din.menu("593", "2015-09-20") """ query = "&date=" + date response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query) return response
[ "def", "menu", "(", "self", ",", "venue_id", ",", "date", ")", ":", "query", "=", "\"&date=\"", "+", "date", "response", "=", "self", ".", "_request", "(", "V2_ENDPOINTS", "[", "'MENUS'", "]", "+", "venue_id", "+", "query", ")", "return", "response" ]
Get the menu for the venue corresponding to venue_id, on date. :param venue_id: A string representing the id of a venue, e.g. "abc". :param date: A string representing the date of a venue's menu, e.g. "2015-09-20". >>> commons_menu = din.menu("593", "2015-09-20")
[ "Get", "the", "menu", "for", "the", "venue", "corresponding", "to", "venue_id", "on", "date", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L117-L131
train
pennlabs/penn-sdk-python
penn/dining.py
Dining.venues
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
python
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
[ "def", "venues", "(", "self", ")", ":", "response", "=", "self", ".", "_request", "(", "V2_ENDPOINTS", "[", "'VENUES'", "]", ")", "# Normalize `dateHours` to array", "for", "venue", "in", "response", "[", "\"result_data\"", "]", "[", "\"document\"", "]", "[", "\"venue\"", "]", ":", "if", "venue", ".", "get", "(", "\"id\"", ")", "in", "VENUE_NAMES", ":", "venue", "[", "\"name\"", "]", "=", "VENUE_NAMES", "[", "venue", ".", "get", "(", "\"id\"", ")", "]", "if", "isinstance", "(", "venue", ".", "get", "(", "\"dateHours\"", ")", ",", "dict", ")", ":", "venue", "[", "\"dateHours\"", "]", "=", "[", "venue", "[", "\"dateHours\"", "]", "]", "if", "\"dateHours\"", "in", "venue", ":", "for", "dh", "in", "venue", "[", "\"dateHours\"", "]", ":", "if", "isinstance", "(", "dh", ".", "get", "(", "\"meal\"", ")", ",", "dict", ")", ":", "dh", "[", "\"meal\"", "]", "=", "[", "dh", "[", "\"meal\"", "]", "]", "return", "response" ]
Get a list of all venue objects. >>> venues = din.venues()
[ "Get", "a", "list", "of", "all", "venue", "objects", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L157-L173
train
pennlabs/penn-sdk-python
penn/dining.py
Dining.menu_daily
def menu_daily(self, building_id): """Get a menu object corresponding to the daily menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_today = din.menu_daily("593") """ today = str(datetime.date.today()) v2_response = DiningV2(self.bearer, self.token).menu(building_id, today) response = {'result_data': {'Document': {}}} response["result_data"]["Document"]["menudate"] = datetime.datetime.strptime(today, '%Y-%m-%d').strftime('%-m/%d/%Y') if building_id in VENUE_NAMES: response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id] else: response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"] response["result_data"]["Document"]["tblMenu"] = {"tblDayPart": get_meals(v2_response, building_id)} return response
python
def menu_daily(self, building_id): """Get a menu object corresponding to the daily menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_today = din.menu_daily("593") """ today = str(datetime.date.today()) v2_response = DiningV2(self.bearer, self.token).menu(building_id, today) response = {'result_data': {'Document': {}}} response["result_data"]["Document"]["menudate"] = datetime.datetime.strptime(today, '%Y-%m-%d').strftime('%-m/%d/%Y') if building_id in VENUE_NAMES: response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id] else: response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"] response["result_data"]["Document"]["tblMenu"] = {"tblDayPart": get_meals(v2_response, building_id)} return response
[ "def", "menu_daily", "(", "self", ",", "building_id", ")", ":", "today", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", "v2_response", "=", "DiningV2", "(", "self", ".", "bearer", ",", "self", ".", "token", ")", ".", "menu", "(", "building_id", ",", "today", ")", "response", "=", "{", "'result_data'", ":", "{", "'Document'", ":", "{", "}", "}", "}", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"menudate\"", "]", "=", "datetime", ".", "datetime", ".", "strptime", "(", "today", ",", "'%Y-%m-%d'", ")", ".", "strftime", "(", "'%-m/%d/%Y'", ")", "if", "building_id", "in", "VENUE_NAMES", ":", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"location\"", "]", "=", "VENUE_NAMES", "[", "building_id", "]", "else", ":", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"location\"", "]", "=", "v2_response", "[", "\"result_data\"", "]", "[", "\"days\"", "]", "[", "0", "]", "[", "\"cafes\"", "]", "[", "building_id", "]", "[", "\"name\"", "]", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", "=", "{", "\"tblDayPart\"", ":", "get_meals", "(", "v2_response", ",", "building_id", ")", "}", "return", "response" ]
Get a menu object corresponding to the daily menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_today = din.menu_daily("593")
[ "Get", "a", "menu", "object", "corresponding", "to", "the", "daily", "menu", "for", "the", "venue", "with", "building_id", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L175-L194
train
pennlabs/penn-sdk-python
penn/dining.py
Dining.menu_weekly
def menu_weekly(self, building_id): """Get an array of menu objects corresponding to the weekly menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_week = din.menu_weekly("593") """ din = DiningV2(self.bearer, self.token) response = {'result_data': {'Document': {}}} days = [] for i in range(7): date = str(datetime.date.today() + datetime.timedelta(days=i)) v2_response = din.menu(building_id, date) if building_id in VENUE_NAMES: response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id] else: response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"] formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y') days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date}) response["result_data"]["Document"]["tblMenu"] = days return normalize_weekly(response)
python
def menu_weekly(self, building_id): """Get an array of menu objects corresponding to the weekly menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_week = din.menu_weekly("593") """ din = DiningV2(self.bearer, self.token) response = {'result_data': {'Document': {}}} days = [] for i in range(7): date = str(datetime.date.today() + datetime.timedelta(days=i)) v2_response = din.menu(building_id, date) if building_id in VENUE_NAMES: response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id] else: response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"] formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y') days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date}) response["result_data"]["Document"]["tblMenu"] = days return normalize_weekly(response)
[ "def", "menu_weekly", "(", "self", ",", "building_id", ")", ":", "din", "=", "DiningV2", "(", "self", ".", "bearer", ",", "self", ".", "token", ")", "response", "=", "{", "'result_data'", ":", "{", "'Document'", ":", "{", "}", "}", "}", "days", "=", "[", "]", "for", "i", "in", "range", "(", "7", ")", ":", "date", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", "+", "datetime", ".", "timedelta", "(", "days", "=", "i", ")", ")", "v2_response", "=", "din", ".", "menu", "(", "building_id", ",", "date", ")", "if", "building_id", "in", "VENUE_NAMES", ":", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"location\"", "]", "=", "VENUE_NAMES", "[", "building_id", "]", "else", ":", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"location\"", "]", "=", "v2_response", "[", "\"result_data\"", "]", "[", "\"days\"", "]", "[", "0", "]", "[", "\"cafes\"", "]", "[", "building_id", "]", "[", "\"name\"", "]", "formatted_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "'%Y-%m-%d'", ")", ".", "strftime", "(", "'%-m/%d/%Y'", ")", "days", ".", "append", "(", "{", "\"tblDayPart\"", ":", "get_meals", "(", "v2_response", ",", "building_id", ")", ",", "\"menudate\"", ":", "formatted_date", "}", ")", "response", "[", "\"result_data\"", "]", "[", "\"Document\"", "]", "[", "\"tblMenu\"", "]", "=", "days", "return", "normalize_weekly", "(", "response", ")" ]
Get an array of menu objects corresponding to the weekly menu for the venue with building_id. :param building_id: A string representing the id of a building, e.g. "abc". >>> commons_week = din.menu_weekly("593")
[ "Get", "an", "array", "of", "menu", "objects", "corresponding", "to", "the", "weekly", "menu", "for", "the", "venue", "with", "building_id", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L196-L218
train
geophysics-ubonn/reda
lib/reda/containers/TDIP.py
TDIP.to_cr
def to_cr(self): """Convert container to a complex resistivity container, using the CPA-conversion. Kemna, 2000 COMPLEX RESISTIVITY COPPER MlNERALlZATlONt SPECTRA OF PORPHYRY Van Voorhis, G. D.; Nelson, P. H.; Drake, T. L. Geophysics (1973 Jan 1) 38 (1): 49-60. Application of complex resistivity tomography to field data from a kerosene-contaminated siteGold Open Access Authors: A. Kemna, E. Räkers and A. Binley DOI: 10.3997/2214-4609.201407300 Gianluca Fiandaca, Esben Auken, Anders Vest Christiansen, and Aurélie Gazoty (2012). ”Time-domain-induced polarization: Full-decay forward modeling and 1D laterally constrained inversion of Cole-Cole parameters.” GEOPHYSICS, 77(3), E213-E225. https://doi.org/10.1190/geo2011-0217.1 """ data_new = self.data.copy() data_new['rpha'] = -1.5 * data_new['chargeability'] # now that we have magnitude and phase, compute the impedance Zt data_new['Zt'] = data_new['r'] * np.exp(data_new['rpha'] * 1j / 1000.0) cr = reda.CR(data=data_new) return cr
python
def to_cr(self): """Convert container to a complex resistivity container, using the CPA-conversion. Kemna, 2000 COMPLEX RESISTIVITY COPPER MlNERALlZATlONt SPECTRA OF PORPHYRY Van Voorhis, G. D.; Nelson, P. H.; Drake, T. L. Geophysics (1973 Jan 1) 38 (1): 49-60. Application of complex resistivity tomography to field data from a kerosene-contaminated siteGold Open Access Authors: A. Kemna, E. Räkers and A. Binley DOI: 10.3997/2214-4609.201407300 Gianluca Fiandaca, Esben Auken, Anders Vest Christiansen, and Aurélie Gazoty (2012). ”Time-domain-induced polarization: Full-decay forward modeling and 1D laterally constrained inversion of Cole-Cole parameters.” GEOPHYSICS, 77(3), E213-E225. https://doi.org/10.1190/geo2011-0217.1 """ data_new = self.data.copy() data_new['rpha'] = -1.5 * data_new['chargeability'] # now that we have magnitude and phase, compute the impedance Zt data_new['Zt'] = data_new['r'] * np.exp(data_new['rpha'] * 1j / 1000.0) cr = reda.CR(data=data_new) return cr
[ "def", "to_cr", "(", "self", ")", ":", "data_new", "=", "self", ".", "data", ".", "copy", "(", ")", "data_new", "[", "'rpha'", "]", "=", "-", "1.5", "*", "data_new", "[", "'chargeability'", "]", "# now that we have magnitude and phase, compute the impedance Zt", "data_new", "[", "'Zt'", "]", "=", "data_new", "[", "'r'", "]", "*", "np", ".", "exp", "(", "data_new", "[", "'rpha'", "]", "*", "1j", "/", "1000.0", ")", "cr", "=", "reda", ".", "CR", "(", "data", "=", "data_new", ")", "return", "cr" ]
Convert container to a complex resistivity container, using the CPA-conversion. Kemna, 2000 COMPLEX RESISTIVITY COPPER MlNERALlZATlONt SPECTRA OF PORPHYRY Van Voorhis, G. D.; Nelson, P. H.; Drake, T. L. Geophysics (1973 Jan 1) 38 (1): 49-60. Application of complex resistivity tomography to field data from a kerosene-contaminated siteGold Open Access Authors: A. Kemna, E. Räkers and A. Binley DOI: 10.3997/2214-4609.201407300 Gianluca Fiandaca, Esben Auken, Anders Vest Christiansen, and Aurélie Gazoty (2012). ”Time-domain-induced polarization: Full-decay forward modeling and 1D laterally constrained inversion of Cole-Cole parameters.” GEOPHYSICS, 77(3), E213-E225. https://doi.org/10.1190/geo2011-0217.1
[ "Convert", "container", "to", "a", "complex", "resistivity", "container", "using", "the", "CPA", "-", "conversion", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/TDIP.py#L174-L200
train
frasertweedale/ledgertools
ltlib/config.py
apply
def apply(filter): """Manufacture decorator that filters return value with given function. ``filter``: Callable that takes a single parameter. """ def decorator(callable): return lambda *args, **kwargs: filter(callable(*args, **kwargs)) return decorator
python
def apply(filter): """Manufacture decorator that filters return value with given function. ``filter``: Callable that takes a single parameter. """ def decorator(callable): return lambda *args, **kwargs: filter(callable(*args, **kwargs)) return decorator
[ "def", "apply", "(", "filter", ")", ":", "def", "decorator", "(", "callable", ")", ":", "return", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "filter", "(", "callable", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "decorator" ]
Manufacture decorator that filters return value with given function. ``filter``: Callable that takes a single parameter.
[ "Manufacture", "decorator", "that", "filters", "return", "value", "with", "given", "function", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L22-L30
train
frasertweedale/ledgertools
ltlib/config.py
format_outpat
def format_outpat(outpat, xn): """ Format an outpat for the given transaction. Format the given output filename pattern. The pattern should be a format string with any combination of the following named fields: ``year`` The year of the transaction. ``month`` The month of the transaction, with leading zero for single-digit months. ``fy`` The financial year of the transaction (being the year in which the financial year of the transaction *ends*). A financial year runs from 1 July to 30 June. ``date`` The date object itself. The format string may specify any attribute of the date object, e.g. ``{date.day}``. This field is deprecated. """ return outpat.format( year=str(xn.date.year), month='{:02}'.format(xn.date.month), fy=str(xn.date.year if xn.date.month < 7 else xn.date.year + 1), date=xn.date )
python
def format_outpat(outpat, xn): """ Format an outpat for the given transaction. Format the given output filename pattern. The pattern should be a format string with any combination of the following named fields: ``year`` The year of the transaction. ``month`` The month of the transaction, with leading zero for single-digit months. ``fy`` The financial year of the transaction (being the year in which the financial year of the transaction *ends*). A financial year runs from 1 July to 30 June. ``date`` The date object itself. The format string may specify any attribute of the date object, e.g. ``{date.day}``. This field is deprecated. """ return outpat.format( year=str(xn.date.year), month='{:02}'.format(xn.date.month), fy=str(xn.date.year if xn.date.month < 7 else xn.date.year + 1), date=xn.date )
[ "def", "format_outpat", "(", "outpat", ",", "xn", ")", ":", "return", "outpat", ".", "format", "(", "year", "=", "str", "(", "xn", ".", "date", ".", "year", ")", ",", "month", "=", "'{:02}'", ".", "format", "(", "xn", ".", "date", ".", "month", ")", ",", "fy", "=", "str", "(", "xn", ".", "date", ".", "year", "if", "xn", ".", "date", ".", "month", "<", "7", "else", "xn", ".", "date", ".", "year", "+", "1", ")", ",", "date", "=", "xn", ".", "date", ")" ]
Format an outpat for the given transaction. Format the given output filename pattern. The pattern should be a format string with any combination of the following named fields: ``year`` The year of the transaction. ``month`` The month of the transaction, with leading zero for single-digit months. ``fy`` The financial year of the transaction (being the year in which the financial year of the transaction *ends*). A financial year runs from 1 July to 30 June. ``date`` The date object itself. The format string may specify any attribute of the date object, e.g. ``{date.day}``. This field is deprecated.
[ "Format", "an", "outpat", "for", "the", "given", "transaction", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L33-L60
train
frasertweedale/ledgertools
ltlib/config.py
Config.get
def get(self, name, acc=None, default=None): """Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``. """ if acc in self.data['accounts'] and name in self.data['accounts'][acc]: return self.data['accounts'][acc][name] if name in self.data: return self.data[name] return default
python
def get(self, name, acc=None, default=None): """Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``. """ if acc in self.data['accounts'] and name in self.data['accounts'][acc]: return self.data['accounts'][acc][name] if name in self.data: return self.data[name] return default
[ "def", "get", "(", "self", ",", "name", ",", "acc", "=", "None", ",", "default", "=", "None", ")", ":", "if", "acc", "in", "self", ".", "data", "[", "'accounts'", "]", "and", "name", "in", "self", ".", "data", "[", "'accounts'", "]", "[", "acc", "]", ":", "return", "self", ".", "data", "[", "'accounts'", "]", "[", "acc", "]", "[", "name", "]", "if", "name", "in", "self", ".", "data", ":", "return", "self", ".", "data", "[", "name", "]", "return", "default" ]
Return the named config for the given account. If an account is given, first checks the account space for the name. If no account given, or if the name not found in the account space, look for the name in the global config space. If still not found, return the default, if given, otherwise ``None``.
[ "Return", "the", "named", "config", "for", "the", "given", "account", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L84-L96
train
frasertweedale/ledgertools
ltlib/config.py
Config.outdir
def outdir(self, acc=None): """Return the outdir for the given account. Attempts to create the directory if it does not exist. """ rootdir = self.rootdir() outdir = self.get('outdir', acc=acc) dir = os.path.join(rootdir, outdir) if rootdir and outdir else None if not os.path.exists(dir): os.makedirs(dir) return dir
python
def outdir(self, acc=None): """Return the outdir for the given account. Attempts to create the directory if it does not exist. """ rootdir = self.rootdir() outdir = self.get('outdir', acc=acc) dir = os.path.join(rootdir, outdir) if rootdir and outdir else None if not os.path.exists(dir): os.makedirs(dir) return dir
[ "def", "outdir", "(", "self", ",", "acc", "=", "None", ")", ":", "rootdir", "=", "self", ".", "rootdir", "(", ")", "outdir", "=", "self", ".", "get", "(", "'outdir'", ",", "acc", "=", "acc", ")", "dir", "=", "os", ".", "path", ".", "join", "(", "rootdir", ",", "outdir", ")", "if", "rootdir", "and", "outdir", "else", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "dir", ")", ":", "os", ".", "makedirs", "(", "dir", ")", "return", "dir" ]
Return the outdir for the given account. Attempts to create the directory if it does not exist.
[ "Return", "the", "outdir", "for", "the", "given", "account", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L104-L114
train
frasertweedale/ledgertools
ltlib/config.py
Config.outpat
def outpat(self, acc=None): """ Determine the full outfile pattern for the given account. Return None if not specified. """ outdir = self.outdir(acc) outpat = self.get('outpat', acc=acc) return os.path.join(outdir, outpat) if outdir and outpat else None
python
def outpat(self, acc=None): """ Determine the full outfile pattern for the given account. Return None if not specified. """ outdir = self.outdir(acc) outpat = self.get('outpat', acc=acc) return os.path.join(outdir, outpat) if outdir and outpat else None
[ "def", "outpat", "(", "self", ",", "acc", "=", "None", ")", ":", "outdir", "=", "self", ".", "outdir", "(", "acc", ")", "outpat", "=", "self", ".", "get", "(", "'outpat'", ",", "acc", "=", "acc", ")", "return", "os", ".", "path", ".", "join", "(", "outdir", ",", "outpat", ")", "if", "outdir", "and", "outpat", "else", "None" ]
Determine the full outfile pattern for the given account. Return None if not specified.
[ "Determine", "the", "full", "outfile", "pattern", "for", "the", "given", "account", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L116-L124
train
frasertweedale/ledgertools
ltlib/config.py
Config.rulesdir
def rulesdir(self, acc=None): """ Determine the rulesdir for the given account. Return None if not specified. """ rootdir = self.rootdir() rulesdir = self.get('rulesdir', acc=acc, default=[]) return os.path.join(rootdir, rulesdir) \ if rootdir and rulesdir else None
python
def rulesdir(self, acc=None): """ Determine the rulesdir for the given account. Return None if not specified. """ rootdir = self.rootdir() rulesdir = self.get('rulesdir', acc=acc, default=[]) return os.path.join(rootdir, rulesdir) \ if rootdir and rulesdir else None
[ "def", "rulesdir", "(", "self", ",", "acc", "=", "None", ")", ":", "rootdir", "=", "self", ".", "rootdir", "(", ")", "rulesdir", "=", "self", ".", "get", "(", "'rulesdir'", ",", "acc", "=", "acc", ",", "default", "=", "[", "]", ")", "return", "os", ".", "path", ".", "join", "(", "rootdir", ",", "rulesdir", ")", "if", "rootdir", "and", "rulesdir", "else", "None" ]
Determine the rulesdir for the given account. Return None if not specified.
[ "Determine", "the", "rulesdir", "for", "the", "given", "account", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L127-L136
train
frasertweedale/ledgertools
ltlib/config.py
Config.rulefiles
def rulefiles(self, acc=None): """Return a list of rulefiles for the given account. Returns an empty list if none specified. """ rulesdir = self.rulesdir(acc) rules = [os.path.join(rulesdir, x) for x in self.get('rules', acc, [])] if acc is not None: rules += self.rulefiles(acc=None) return rules
python
def rulefiles(self, acc=None): """Return a list of rulefiles for the given account. Returns an empty list if none specified. """ rulesdir = self.rulesdir(acc) rules = [os.path.join(rulesdir, x) for x in self.get('rules', acc, [])] if acc is not None: rules += self.rulefiles(acc=None) return rules
[ "def", "rulefiles", "(", "self", ",", "acc", "=", "None", ")", ":", "rulesdir", "=", "self", ".", "rulesdir", "(", "acc", ")", "rules", "=", "[", "os", ".", "path", ".", "join", "(", "rulesdir", ",", "x", ")", "for", "x", "in", "self", ".", "get", "(", "'rules'", ",", "acc", ",", "[", "]", ")", "]", "if", "acc", "is", "not", "None", ":", "rules", "+=", "self", ".", "rulefiles", "(", "acc", "=", "None", ")", "return", "rules" ]
Return a list of rulefiles for the given account. Returns an empty list if none specified.
[ "Return", "a", "list", "of", "rulefiles", "for", "the", "given", "account", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/config.py#L138-L147
train
geophysics-ubonn/reda
lib/reda/utils/data.py
download_data
def download_data(identifier, outdir): """Download data from a separate data repository for testing. Parameters ---------- identifier: string The identifier used to find the data set outdir: string unzip the data in this directory """ # determine target if use_local_data_repository is not None: url_base = 'file:' + request.pathname2url( use_local_data_repository + os.sep) else: url_base = repository_url print('url_base: {}'.format(url_base)) url = url_base + inventory_filename # download inventory file filename, headers =request.urlretrieve(url) df = pd.read_csv( filename, delim_whitespace=True, comment='#', header=None, names=['identifier', 'rel_path'], ) # find relative path to data file rel_path_query = df.query('identifier == "{}"'.format(identifier)) if rel_path_query.shape[0] == 0: raise Exception('identifier not found') rel_path = rel_path_query['rel_path'].values[0] # download the file url = url_base + rel_path print('data url: {}'.format(url)) filename, headers =request.urlretrieve(url) if not os.path.isdir(outdir): os.makedirs(outdir) zip_obj = zipfile.ZipFile(filename) zip_obj.extractall(outdir)
python
def download_data(identifier, outdir): """Download data from a separate data repository for testing. Parameters ---------- identifier: string The identifier used to find the data set outdir: string unzip the data in this directory """ # determine target if use_local_data_repository is not None: url_base = 'file:' + request.pathname2url( use_local_data_repository + os.sep) else: url_base = repository_url print('url_base: {}'.format(url_base)) url = url_base + inventory_filename # download inventory file filename, headers =request.urlretrieve(url) df = pd.read_csv( filename, delim_whitespace=True, comment='#', header=None, names=['identifier', 'rel_path'], ) # find relative path to data file rel_path_query = df.query('identifier == "{}"'.format(identifier)) if rel_path_query.shape[0] == 0: raise Exception('identifier not found') rel_path = rel_path_query['rel_path'].values[0] # download the file url = url_base + rel_path print('data url: {}'.format(url)) filename, headers =request.urlretrieve(url) if not os.path.isdir(outdir): os.makedirs(outdir) zip_obj = zipfile.ZipFile(filename) zip_obj.extractall(outdir)
[ "def", "download_data", "(", "identifier", ",", "outdir", ")", ":", "# determine target", "if", "use_local_data_repository", "is", "not", "None", ":", "url_base", "=", "'file:'", "+", "request", ".", "pathname2url", "(", "use_local_data_repository", "+", "os", ".", "sep", ")", "else", ":", "url_base", "=", "repository_url", "print", "(", "'url_base: {}'", ".", "format", "(", "url_base", ")", ")", "url", "=", "url_base", "+", "inventory_filename", "# download inventory file", "filename", ",", "headers", "=", "request", ".", "urlretrieve", "(", "url", ")", "df", "=", "pd", ".", "read_csv", "(", "filename", ",", "delim_whitespace", "=", "True", ",", "comment", "=", "'#'", ",", "header", "=", "None", ",", "names", "=", "[", "'identifier'", ",", "'rel_path'", "]", ",", ")", "# find relative path to data file", "rel_path_query", "=", "df", ".", "query", "(", "'identifier == \"{}\"'", ".", "format", "(", "identifier", ")", ")", "if", "rel_path_query", ".", "shape", "[", "0", "]", "==", "0", ":", "raise", "Exception", "(", "'identifier not found'", ")", "rel_path", "=", "rel_path_query", "[", "'rel_path'", "]", ".", "values", "[", "0", "]", "# download the file", "url", "=", "url_base", "+", "rel_path", "print", "(", "'data url: {}'", ".", "format", "(", "url", ")", ")", "filename", ",", "headers", "=", "request", ".", "urlretrieve", "(", "url", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "outdir", ")", ":", "os", ".", "makedirs", "(", "outdir", ")", "zip_obj", "=", "zipfile", ".", "ZipFile", "(", "filename", ")", "zip_obj", ".", "extractall", "(", "outdir", ")" ]
Download data from a separate data repository for testing. Parameters ---------- identifier: string The identifier used to find the data set outdir: string unzip the data in this directory
[ "Download", "data", "from", "a", "separate", "data", "repository", "for", "testing", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/data.py#L21-L66
train
frasertweedale/ledgertools
ltlib/score.py
ScoreSet.append
def append(self, item): """Append an item to the score set. item is a pair tuple, the first element of which is a valid dict key and the second of which is a numeric value. """ if item in self: self.items[item[0]].append(item[1]) else: self.items[item[0]] = [item[1]]
python
def append(self, item): """Append an item to the score set. item is a pair tuple, the first element of which is a valid dict key and the second of which is a numeric value. """ if item in self: self.items[item[0]].append(item[1]) else: self.items[item[0]] = [item[1]]
[ "def", "append", "(", "self", ",", "item", ")", ":", "if", "item", "in", "self", ":", "self", ".", "items", "[", "item", "[", "0", "]", "]", ".", "append", "(", "item", "[", "1", "]", ")", "else", ":", "self", ".", "items", "[", "item", "[", "0", "]", "]", "=", "[", "item", "[", "1", "]", "]" ]
Append an item to the score set. item is a pair tuple, the first element of which is a valid dict key and the second of which is a numeric value.
[ "Append", "an", "item", "to", "the", "score", "set", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/score.py#L34-L43
train
frasertweedale/ledgertools
ltlib/score.py
ScoreSet.scores
def scores(self): """Return a list of the items with their final scores. The final score of each item is its average score multiplied by the square root of its length. This reduces to sum * len^(-1/2). """ return map( lambda x: (x[0], sum(x[1]) * len(x[1]) ** -.5), iter(self.items.viewitems()) )
python
def scores(self): """Return a list of the items with their final scores. The final score of each item is its average score multiplied by the square root of its length. This reduces to sum * len^(-1/2). """ return map( lambda x: (x[0], sum(x[1]) * len(x[1]) ** -.5), iter(self.items.viewitems()) )
[ "def", "scores", "(", "self", ")", ":", "return", "map", "(", "lambda", "x", ":", "(", "x", "[", "0", "]", ",", "sum", "(", "x", "[", "1", "]", ")", "*", "len", "(", "x", "[", "1", "]", ")", "**", "-", ".5", ")", ",", "iter", "(", "self", ".", "items", ".", "viewitems", "(", ")", ")", ")" ]
Return a list of the items with their final scores. The final score of each item is its average score multiplied by the square root of its length. This reduces to sum * len^(-1/2).
[ "Return", "a", "list", "of", "the", "items", "with", "their", "final", "scores", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/score.py#L45-L54
train
frasertweedale/ledgertools
ltlib/score.py
ScoreSet.highest
def highest(self): """Return the items with the higest score. If this ScoreSet is empty, returns None. """ scores = self.scores() if not scores: return None maxscore = max(map(score, scores)) return filter(lambda x: score(x) == maxscore, scores)
python
def highest(self): """Return the items with the higest score. If this ScoreSet is empty, returns None. """ scores = self.scores() if not scores: return None maxscore = max(map(score, scores)) return filter(lambda x: score(x) == maxscore, scores)
[ "def", "highest", "(", "self", ")", ":", "scores", "=", "self", ".", "scores", "(", ")", "if", "not", "scores", ":", "return", "None", "maxscore", "=", "max", "(", "map", "(", "score", ",", "scores", ")", ")", "return", "filter", "(", "lambda", "x", ":", "score", "(", "x", ")", "==", "maxscore", ",", "scores", ")" ]
Return the items with the higest score. If this ScoreSet is empty, returns None.
[ "Return", "the", "items", "with", "the", "higest", "score", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/score.py#L56-L65
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/parser_context.py
ParserContext.is_empty_shape
def is_empty_shape(sh: ShExJ.Shape) -> bool: """ Determine whether sh has any value """ return sh.closed is None and sh.expression is None and sh.extra is None and \ sh.semActs is None
python
def is_empty_shape(sh: ShExJ.Shape) -> bool: """ Determine whether sh has any value """ return sh.closed is None and sh.expression is None and sh.extra is None and \ sh.semActs is None
[ "def", "is_empty_shape", "(", "sh", ":", "ShExJ", ".", "Shape", ")", "->", "bool", ":", "return", "sh", ".", "closed", "is", "None", "and", "sh", ".", "expression", "is", "None", "and", "sh", ".", "extra", "is", "None", "and", "sh", ".", "semActs", "is", "None" ]
Determine whether sh has any value
[ "Determine", "whether", "sh", "has", "any", "value" ]
4497cd1f73fa6703bca6e2cb53ba9c120f22e48c
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/parser_context.py#L188-L191
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/parser_context.py
ParserContext.fix_text_escapes
def fix_text_escapes(self, txt: str, quote_char: str) -> str: """ Fix the various text escapes """ def _subf(matchobj): return matchobj.group(0).translate(self.re_trans_table) if quote_char: txt = re.sub(r'\\'+quote_char, quote_char, txt) return re.sub(r'\\.', _subf, txt, flags=re.MULTILINE + re.DOTALL + re.UNICODE)
python
def fix_text_escapes(self, txt: str, quote_char: str) -> str: """ Fix the various text escapes """ def _subf(matchobj): return matchobj.group(0).translate(self.re_trans_table) if quote_char: txt = re.sub(r'\\'+quote_char, quote_char, txt) return re.sub(r'\\.', _subf, txt, flags=re.MULTILINE + re.DOTALL + re.UNICODE)
[ "def", "fix_text_escapes", "(", "self", ",", "txt", ":", "str", ",", "quote_char", ":", "str", ")", "->", "str", ":", "def", "_subf", "(", "matchobj", ")", ":", "return", "matchobj", ".", "group", "(", "0", ")", ".", "translate", "(", "self", ".", "re_trans_table", ")", "if", "quote_char", ":", "txt", "=", "re", ".", "sub", "(", "r'\\\\'", "+", "quote_char", ",", "quote_char", ",", "txt", ")", "return", "re", ".", "sub", "(", "r'\\\\.'", ",", "_subf", ",", "txt", ",", "flags", "=", "re", ".", "MULTILINE", "+", "re", ".", "DOTALL", "+", "re", ".", "UNICODE", ")" ]
Fix the various text escapes
[ "Fix", "the", "various", "text", "escapes" ]
4497cd1f73fa6703bca6e2cb53ba9c120f22e48c
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/parser_context.py#L195-L201
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/parser_context.py
ParserContext.fix_re_escapes
def fix_re_escapes(self, txt: str) -> str: """ The ShEx RE engine allows escaping any character. We have to remove that escape for everything except those that CAN be legitimately escaped :param txt: text to be escaped """ def _subf(matchobj): # o = self.fix_text_escapes(matchobj.group(0)) o = matchobj.group(0).translate(self.re_trans_table) if o[1] in '\b\f\n\t\r': return o[0] + 'bfntr'['\b\f\n\t\r'.index(o[1])] else: return o if o[1] in '\\.?*+^$()[]{|}' else o[1] return re.sub(r'\\.', _subf, txt, flags=re.MULTILINE + re.DOTALL + re.UNICODE)
python
def fix_re_escapes(self, txt: str) -> str: """ The ShEx RE engine allows escaping any character. We have to remove that escape for everything except those that CAN be legitimately escaped :param txt: text to be escaped """ def _subf(matchobj): # o = self.fix_text_escapes(matchobj.group(0)) o = matchobj.group(0).translate(self.re_trans_table) if o[1] in '\b\f\n\t\r': return o[0] + 'bfntr'['\b\f\n\t\r'.index(o[1])] else: return o if o[1] in '\\.?*+^$()[]{|}' else o[1] return re.sub(r'\\.', _subf, txt, flags=re.MULTILINE + re.DOTALL + re.UNICODE)
[ "def", "fix_re_escapes", "(", "self", ",", "txt", ":", "str", ")", "->", "str", ":", "def", "_subf", "(", "matchobj", ")", ":", "# o = self.fix_text_escapes(matchobj.group(0))", "o", "=", "matchobj", ".", "group", "(", "0", ")", ".", "translate", "(", "self", ".", "re_trans_table", ")", "if", "o", "[", "1", "]", "in", "'\\b\\f\\n\\t\\r'", ":", "return", "o", "[", "0", "]", "+", "'bfntr'", "[", "'\\b\\f\\n\\t\\r'", ".", "index", "(", "o", "[", "1", "]", ")", "]", "else", ":", "return", "o", "if", "o", "[", "1", "]", "in", "'\\\\.?*+^$()[]{|}'", "else", "o", "[", "1", "]", "return", "re", ".", "sub", "(", "r'\\\\.'", ",", "_subf", ",", "txt", ",", "flags", "=", "re", ".", "MULTILINE", "+", "re", ".", "DOTALL", "+", "re", ".", "UNICODE", ")" ]
The ShEx RE engine allows escaping any character. We have to remove that escape for everything except those that CAN be legitimately escaped :param txt: text to be escaped
[ "The", "ShEx", "RE", "engine", "allows", "escaping", "any", "character", ".", "We", "have", "to", "remove", "that", "escape", "for", "everything", "except", "those", "that", "CAN", "be", "legitimately", "escaped" ]
4497cd1f73fa6703bca6e2cb53ba9c120f22e48c
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/parser_context.py#L203-L217
train
pennlabs/penn-sdk-python
penn/registrar.py
Registrar._iter_response
def _iter_response(self, url, params=None): """Return an enumerable that iterates through a multi-page API request""" if params is None: params = {} params['page_number'] = 1 # Last page lists itself as next page while True: response = self._request(url, params) for item in response['result_data']: yield item # Last page lists itself as next page if response['service_meta']['next_page_number'] == params['page_number']: break params['page_number'] += 1
python
def _iter_response(self, url, params=None): """Return an enumerable that iterates through a multi-page API request""" if params is None: params = {} params['page_number'] = 1 # Last page lists itself as next page while True: response = self._request(url, params) for item in response['result_data']: yield item # Last page lists itself as next page if response['service_meta']['next_page_number'] == params['page_number']: break params['page_number'] += 1
[ "def", "_iter_response", "(", "self", ",", "url", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "params", "[", "'page_number'", "]", "=", "1", "# Last page lists itself as next page", "while", "True", ":", "response", "=", "self", ".", "_request", "(", "url", ",", "params", ")", "for", "item", "in", "response", "[", "'result_data'", "]", ":", "yield", "item", "# Last page lists itself as next page", "if", "response", "[", "'service_meta'", "]", "[", "'next_page_number'", "]", "==", "params", "[", "'page_number'", "]", ":", "break", "params", "[", "'page_number'", "]", "+=", "1" ]
Return an enumerable that iterates through a multi-page API request
[ "Return", "an", "enumerable", "that", "iterates", "through", "a", "multi", "-", "page", "API", "request" ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/registrar.py#L28-L45
train
pennlabs/penn-sdk-python
penn/registrar.py
Registrar.search
def search(self, params, validate=False): """Return a generator of section objects for the given search params. :param params: Dictionary of course search parameters. :param validate: Optional. Set to true to enable request validation. >>> cis100s = r.search({'course_id': 'cis', 'course_level_at_or_below': '200'}) """ if self.val_info is None: self.val_info = self.search_params() if validate: errors = self.validate(self.val_info, params) if not validate or len(errors) == 0: return self._iter_response(ENDPOINTS['SEARCH'], params) else: return {'Errors': errors}
python
def search(self, params, validate=False): """Return a generator of section objects for the given search params. :param params: Dictionary of course search parameters. :param validate: Optional. Set to true to enable request validation. >>> cis100s = r.search({'course_id': 'cis', 'course_level_at_or_below': '200'}) """ if self.val_info is None: self.val_info = self.search_params() if validate: errors = self.validate(self.val_info, params) if not validate or len(errors) == 0: return self._iter_response(ENDPOINTS['SEARCH'], params) else: return {'Errors': errors}
[ "def", "search", "(", "self", ",", "params", ",", "validate", "=", "False", ")", ":", "if", "self", ".", "val_info", "is", "None", ":", "self", ".", "val_info", "=", "self", ".", "search_params", "(", ")", "if", "validate", ":", "errors", "=", "self", ".", "validate", "(", "self", ".", "val_info", ",", "params", ")", "if", "not", "validate", "or", "len", "(", "errors", ")", "==", "0", ":", "return", "self", ".", "_iter_response", "(", "ENDPOINTS", "[", "'SEARCH'", "]", ",", "params", ")", "else", ":", "return", "{", "'Errors'", ":", "errors", "}" ]
Return a generator of section objects for the given search params. :param params: Dictionary of course search parameters. :param validate: Optional. Set to true to enable request validation. >>> cis100s = r.search({'course_id': 'cis', 'course_level_at_or_below': '200'})
[ "Return", "a", "generator", "of", "section", "objects", "for", "the", "given", "search", "params", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/registrar.py#L47-L63
train
pennlabs/penn-sdk-python
penn/registrar.py
Registrar.course
def course(self, dept, course_number): """Return an object of semester-independent course info. All arguments should be strings. >>> cis120 = r.course('cis', '120') """ response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number)) return response['result_data'][0]
python
def course(self, dept, course_number): """Return an object of semester-independent course info. All arguments should be strings. >>> cis120 = r.course('cis', '120') """ response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number)) return response['result_data'][0]
[ "def", "course", "(", "self", ",", "dept", ",", "course_number", ")", ":", "response", "=", "self", ".", "_request", "(", "path", ".", "join", "(", "ENDPOINTS", "[", "'CATALOG'", "]", ",", "dept", ",", "course_number", ")", ")", "return", "response", "[", "'result_data'", "]", "[", "0", "]" ]
Return an object of semester-independent course info. All arguments should be strings. >>> cis120 = r.course('cis', '120')
[ "Return", "an", "object", "of", "semester", "-", "independent", "course", "info", ".", "All", "arguments", "should", "be", "strings", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/registrar.py#L65-L72
train
pennlabs/penn-sdk-python
penn/registrar.py
Registrar.section
def section(self, dept, course_number, sect_number): """Return a single section object for the given section. All arguments should be strings. Throws a `ValueError` if the section is not found. >>> lgst101_bfs = r.course('lgst', '101', '301') """ section_id = dept + course_number + sect_number sections = self.search({'course_id': section_id}) try: return next(sections) except StopIteration: raise ValueError('Section %s not found' % section_id)
python
def section(self, dept, course_number, sect_number): """Return a single section object for the given section. All arguments should be strings. Throws a `ValueError` if the section is not found. >>> lgst101_bfs = r.course('lgst', '101', '301') """ section_id = dept + course_number + sect_number sections = self.search({'course_id': section_id}) try: return next(sections) except StopIteration: raise ValueError('Section %s not found' % section_id)
[ "def", "section", "(", "self", ",", "dept", ",", "course_number", ",", "sect_number", ")", ":", "section_id", "=", "dept", "+", "course_number", "+", "sect_number", "sections", "=", "self", ".", "search", "(", "{", "'course_id'", ":", "section_id", "}", ")", "try", ":", "return", "next", "(", "sections", ")", "except", "StopIteration", ":", "raise", "ValueError", "(", "'Section %s not found'", "%", "section_id", ")" ]
Return a single section object for the given section. All arguments should be strings. Throws a `ValueError` if the section is not found. >>> lgst101_bfs = r.course('lgst', '101', '301')
[ "Return", "a", "single", "section", "object", "for", "the", "given", "section", ".", "All", "arguments", "should", "be", "strings", ".", "Throws", "a", "ValueError", "if", "the", "section", "is", "not", "found", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/registrar.py#L80-L91
train
Metatab/geoid
geoid/core.py
parse_to_gvid
def parse_to_gvid(v): """Parse an ACS Geoid or a GVID to a GVID""" from geoid.civick import GVid from geoid.acs import AcsGeoid m1 = '' try: return GVid.parse(v) except ValueError as e: m1 = str(e) try: return AcsGeoid.parse(v).convert(GVid) except ValueError as e: raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
python
def parse_to_gvid(v): """Parse an ACS Geoid or a GVID to a GVID""" from geoid.civick import GVid from geoid.acs import AcsGeoid m1 = '' try: return GVid.parse(v) except ValueError as e: m1 = str(e) try: return AcsGeoid.parse(v).convert(GVid) except ValueError as e: raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
[ "def", "parse_to_gvid", "(", "v", ")", ":", "from", "geoid", ".", "civick", "import", "GVid", "from", "geoid", ".", "acs", "import", "AcsGeoid", "m1", "=", "''", "try", ":", "return", "GVid", ".", "parse", "(", "v", ")", "except", "ValueError", "as", "e", ":", "m1", "=", "str", "(", "e", ")", "try", ":", "return", "AcsGeoid", ".", "parse", "(", "v", ")", ".", "convert", "(", "GVid", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "\"Failed to parse to either ACS or GVid: {}; {}\"", ".", "format", "(", "m1", ",", "str", "(", "e", ")", ")", ")" ]
Parse an ACS Geoid or a GVID to a GVID
[ "Parse", "an", "ACS", "Geoid", "or", "a", "GVID", "to", "a", "GVID" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L342-L357
train
Metatab/geoid
geoid/core.py
base62_decode
def base62_decode(string): """Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479 """ alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) num += alphabet.index(char) * (base ** power) idx += 1 return int(num)
python
def base62_decode(string): """Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479 """ alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) num += alphabet.index(char) * (base ** power) idx += 1 return int(num)
[ "def", "base62_decode", "(", "string", ")", ":", "alphabet", "=", "'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'", "base", "=", "len", "(", "alphabet", ")", "strlen", "=", "len", "(", "string", ")", "num", "=", "0", "idx", "=", "0", "for", "char", "in", "string", ":", "power", "=", "(", "strlen", "-", "(", "idx", "+", "1", ")", ")", "num", "+=", "alphabet", ".", "index", "(", "char", ")", "*", "(", "base", "**", "power", ")", "idx", "+=", "1", "return", "int", "(", "num", ")" ]
Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479
[ "Decode", "a", "Base", "X", "encoded", "string", "into", "the", "number" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L384-L405
train
Metatab/geoid
geoid/core.py
make_classes
def make_classes(base_class, module): """Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict. """ from functools import partial for k in names: cls = base_class.class_factory(k.capitalize()) cls.augment() setattr(module, k.capitalize(), cls) setattr(module, 'get_class', partial(get_class, module))
python
def make_classes(base_class, module): """Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict. """ from functools import partial for k in names: cls = base_class.class_factory(k.capitalize()) cls.augment() setattr(module, k.capitalize(), cls) setattr(module, 'get_class', partial(get_class, module))
[ "def", "make_classes", "(", "base_class", ",", "module", ")", ":", "from", "functools", "import", "partial", "for", "k", "in", "names", ":", "cls", "=", "base_class", ".", "class_factory", "(", "k", ".", "capitalize", "(", ")", ")", "cls", ".", "augment", "(", ")", "setattr", "(", "module", ",", "k", ".", "capitalize", "(", ")", ",", "cls", ")", "setattr", "(", "module", ",", "'get_class'", ",", "partial", "(", "get_class", ",", "module", ")", ")" ]
Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict.
[ "Create", "derived", "classes", "and", "put", "them", "into", "the", "same", "module", "as", "the", "base", "class", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L428-L446
train
Metatab/geoid
geoid/core.py
generate_all
def generate_all(sumlevel, d): """Generate a dict that includes all of the available geoid values, with keys for the most common names for those values. """ from geoid.civick import GVid from geoid.tiger import TigerGeoid from geoid.acs import AcsGeoid sumlevel = int(sumlevel) d = dict(d.items()) # Map common name variants if 'cousub' in d: d['cosub'] = d['cousub'] del d['cousub'] if 'blkgrp' in d: d['blockgroup'] = d['blkgrp'] del d['blkgrp'] if 'zcta5' in d: d['zcta'] = d['zcta5'] del d['zcta5'] gvid_class = GVid.resolve_summary_level(sumlevel) if not gvid_class: return {} geoidt_class = TigerGeoid.resolve_summary_level(sumlevel) geoid_class = AcsGeoid.resolve_summary_level(sumlevel) try: return dict( gvid=str(gvid_class(**d)), geoid=str(geoid_class(**d)), geoidt=str(geoidt_class(**d)) ) except: raise
python
def generate_all(sumlevel, d): """Generate a dict that includes all of the available geoid values, with keys for the most common names for those values. """ from geoid.civick import GVid from geoid.tiger import TigerGeoid from geoid.acs import AcsGeoid sumlevel = int(sumlevel) d = dict(d.items()) # Map common name variants if 'cousub' in d: d['cosub'] = d['cousub'] del d['cousub'] if 'blkgrp' in d: d['blockgroup'] = d['blkgrp'] del d['blkgrp'] if 'zcta5' in d: d['zcta'] = d['zcta5'] del d['zcta5'] gvid_class = GVid.resolve_summary_level(sumlevel) if not gvid_class: return {} geoidt_class = TigerGeoid.resolve_summary_level(sumlevel) geoid_class = AcsGeoid.resolve_summary_level(sumlevel) try: return dict( gvid=str(gvid_class(**d)), geoid=str(geoid_class(**d)), geoidt=str(geoidt_class(**d)) ) except: raise
[ "def", "generate_all", "(", "sumlevel", ",", "d", ")", ":", "from", "geoid", ".", "civick", "import", "GVid", "from", "geoid", ".", "tiger", "import", "TigerGeoid", "from", "geoid", ".", "acs", "import", "AcsGeoid", "sumlevel", "=", "int", "(", "sumlevel", ")", "d", "=", "dict", "(", "d", ".", "items", "(", ")", ")", "# Map common name variants", "if", "'cousub'", "in", "d", ":", "d", "[", "'cosub'", "]", "=", "d", "[", "'cousub'", "]", "del", "d", "[", "'cousub'", "]", "if", "'blkgrp'", "in", "d", ":", "d", "[", "'blockgroup'", "]", "=", "d", "[", "'blkgrp'", "]", "del", "d", "[", "'blkgrp'", "]", "if", "'zcta5'", "in", "d", ":", "d", "[", "'zcta'", "]", "=", "d", "[", "'zcta5'", "]", "del", "d", "[", "'zcta5'", "]", "gvid_class", "=", "GVid", ".", "resolve_summary_level", "(", "sumlevel", ")", "if", "not", "gvid_class", ":", "return", "{", "}", "geoidt_class", "=", "TigerGeoid", ".", "resolve_summary_level", "(", "sumlevel", ")", "geoid_class", "=", "AcsGeoid", ".", "resolve_summary_level", "(", "sumlevel", ")", "try", ":", "return", "dict", "(", "gvid", "=", "str", "(", "gvid_class", "(", "*", "*", "d", ")", ")", ",", "geoid", "=", "str", "(", "geoid_class", "(", "*", "*", "d", ")", ")", ",", "geoidt", "=", "str", "(", "geoidt_class", "(", "*", "*", "d", ")", ")", ")", "except", ":", "raise" ]
Generate a dict that includes all of the available geoid values, with keys for the most common names for those values.
[ "Generate", "a", "dict", "that", "includes", "all", "of", "the", "available", "geoid", "values", "with", "keys", "for", "the", "most", "common", "names", "for", "those", "values", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L887-L927
train
Metatab/geoid
geoid/core.py
_generate_names
def _generate_names(): """ Code to generate the state and county names >>> python -c 'import geoid; geoid._generate_names()' """ from ambry import get_library l = get_library() counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50') states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40') names = {} for row in counties.remote_datafile.reader: names[(row.state, row.county)] = row.name for row in states.remote_datafile.reader: if row.component == '00': names[(row.state, 0)] = row.name pprint.pprint(names)
python
def _generate_names(): """ Code to generate the state and county names >>> python -c 'import geoid; geoid._generate_names()' """ from ambry import get_library l = get_library() counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50') states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40') names = {} for row in counties.remote_datafile.reader: names[(row.state, row.county)] = row.name for row in states.remote_datafile.reader: if row.component == '00': names[(row.state, 0)] = row.name pprint.pprint(names)
[ "def", "_generate_names", "(", ")", ":", "from", "ambry", "import", "get_library", "l", "=", "get_library", "(", ")", "counties", "=", "l", ".", "partition", "(", "'census.gov-acs-geofile-2009-geofile50-20095-50'", ")", "states", "=", "l", ".", "partition", "(", "'census.gov-acs-geofile-2009-geofile40-20095-40'", ")", "names", "=", "{", "}", "for", "row", "in", "counties", ".", "remote_datafile", ".", "reader", ":", "names", "[", "(", "row", ".", "state", ",", "row", ".", "county", ")", "]", "=", "row", ".", "name", "for", "row", "in", "states", ".", "remote_datafile", ".", "reader", ":", "if", "row", ".", "component", "==", "'00'", ":", "names", "[", "(", "row", ".", "state", ",", "0", ")", "]", "=", "row", ".", "name", "pprint", ".", "pprint", "(", "names", ")" ]
Code to generate the state and county names >>> python -c 'import geoid; geoid._generate_names()'
[ "Code", "to", "generate", "the", "state", "and", "county", "names" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L930-L952
train
Metatab/geoid
geoid/core.py
CountyName.division_name
def division_name(self): """The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'""" try: return next(e for e in self.type_names_re.search(self.name).groups() if e is not None) except AttributeError: # The search will fail for 'District of Columbia' return ''
python
def division_name(self): """The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'""" try: return next(e for e in self.type_names_re.search(self.name).groups() if e is not None) except AttributeError: # The search will fail for 'District of Columbia' return ''
[ "def", "division_name", "(", "self", ")", ":", "try", ":", "return", "next", "(", "e", "for", "e", "in", "self", ".", "type_names_re", ".", "search", "(", "self", ".", "name", ")", ".", "groups", "(", ")", "if", "e", "is", "not", "None", ")", "except", "AttributeError", ":", "# The search will fail for 'District of Columbia'", "return", "''" ]
The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough
[ "The", "type", "designation", "for", "the", "county", "or", "county", "equivalent", "such", "as", "County", "Parish", "or", "Borough" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L489-L495
train
Metatab/geoid
geoid/core.py
Geoid.augment
def augment(cls): """Augment the class with computed formats, regexes, and other things. This caches these values so they don't have to be created for every instance. """ import re level_name = cls.__name__.lower() cls.sl = names[level_name] cls.class_map[cls.__name__.lower()] = cls cls.sl_map[cls.sl] = cls cls.fmt = cls.make_format_string(cls.__name__.lower()) cls.regex_str = cls.make_regex(cls.__name__.lower()) cls.regex = re.compile(cls.regex_str) # List of field names cls.level = level_name cls.fields = segments[cls.sl]
python
def augment(cls): """Augment the class with computed formats, regexes, and other things. This caches these values so they don't have to be created for every instance. """ import re level_name = cls.__name__.lower() cls.sl = names[level_name] cls.class_map[cls.__name__.lower()] = cls cls.sl_map[cls.sl] = cls cls.fmt = cls.make_format_string(cls.__name__.lower()) cls.regex_str = cls.make_regex(cls.__name__.lower()) cls.regex = re.compile(cls.regex_str) # List of field names cls.level = level_name cls.fields = segments[cls.sl]
[ "def", "augment", "(", "cls", ")", ":", "import", "re", "level_name", "=", "cls", ".", "__name__", ".", "lower", "(", ")", "cls", ".", "sl", "=", "names", "[", "level_name", "]", "cls", ".", "class_map", "[", "cls", ".", "__name__", ".", "lower", "(", ")", "]", "=", "cls", "cls", ".", "sl_map", "[", "cls", ".", "sl", "]", "=", "cls", "cls", ".", "fmt", "=", "cls", ".", "make_format_string", "(", "cls", ".", "__name__", ".", "lower", "(", ")", ")", "cls", ".", "regex_str", "=", "cls", ".", "make_regex", "(", "cls", ".", "__name__", ".", "lower", "(", ")", ")", "cls", ".", "regex", "=", "re", ".", "compile", "(", "cls", ".", "regex_str", ")", "# List of field names", "cls", ".", "level", "=", "level_name", "cls", ".", "fields", "=", "segments", "[", "cls", ".", "sl", "]" ]
Augment the class with computed formats, regexes, and other things. This caches these values so they don't have to be created for every instance.
[ "Augment", "the", "class", "with", "computed", "formats", "regexes", "and", "other", "things", ".", "This", "caches", "these", "values", "so", "they", "don", "t", "have", "to", "be", "created", "for", "every", "instance", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L562-L583
train
Metatab/geoid
geoid/core.py
Geoid.get_class
def get_class(cls, name_or_sl): """Return a derived class based on the class name or the summary_level""" try: return cls.sl_map[int(name_or_sl)] except TypeError as e: raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e)) except ValueError: try: return cls.class_map[name_or_sl.lower()] except (KeyError, ValueError): raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
python
def get_class(cls, name_or_sl): """Return a derived class based on the class name or the summary_level""" try: return cls.sl_map[int(name_or_sl)] except TypeError as e: raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e)) except ValueError: try: return cls.class_map[name_or_sl.lower()] except (KeyError, ValueError): raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
[ "def", "get_class", "(", "cls", ",", "name_or_sl", ")", ":", "try", ":", "return", "cls", ".", "sl_map", "[", "int", "(", "name_or_sl", ")", "]", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "\"Bad name or sl: {} : {}\"", ".", "format", "(", "name_or_sl", ",", "e", ")", ")", "except", "ValueError", ":", "try", ":", "return", "cls", ".", "class_map", "[", "name_or_sl", ".", "lower", "(", ")", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "NotASummaryName", "(", "\"Value '{}' is not a valid summary level\"", ".", "format", "(", "name_or_sl", ")", ")" ]
Return a derived class based on the class name or the summary_level
[ "Return", "a", "derived", "class", "based", "on", "the", "class", "name", "or", "the", "summary_level" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L587-L598
train
Metatab/geoid
geoid/core.py
Geoid.geo_name
def geo_name(self): """ Return a name of the state or county, or, for other lowever levels, the name of the level type in the county. :return: """ if self.level == 'county': return str(self.county_name) elif self.level == 'state': return self.state_name else: if hasattr(self, 'county'): return "{} in {}".format(self.level,str(self.county_name)) elif hasattr(self, 'state'): return "{} in {}".format(self.level, self.state_name) else: return "a {}".format(self.level)
python
def geo_name(self): """ Return a name of the state or county, or, for other lowever levels, the name of the level type in the county. :return: """ if self.level == 'county': return str(self.county_name) elif self.level == 'state': return self.state_name else: if hasattr(self, 'county'): return "{} in {}".format(self.level,str(self.county_name)) elif hasattr(self, 'state'): return "{} in {}".format(self.level, self.state_name) else: return "a {}".format(self.level)
[ "def", "geo_name", "(", "self", ")", ":", "if", "self", ".", "level", "==", "'county'", ":", "return", "str", "(", "self", ".", "county_name", ")", "elif", "self", ".", "level", "==", "'state'", ":", "return", "self", ".", "state_name", "else", ":", "if", "hasattr", "(", "self", ",", "'county'", ")", ":", "return", "\"{} in {}\"", ".", "format", "(", "self", ".", "level", ",", "str", "(", "self", ".", "county_name", ")", ")", "elif", "hasattr", "(", "self", ",", "'state'", ")", ":", "return", "\"{} in {}\"", ".", "format", "(", "self", ".", "level", ",", "self", ".", "state_name", ")", "else", ":", "return", "\"a {}\"", ".", "format", "(", "self", ".", "level", ")" ]
Return a name of the state or county, or, for other lowever levels, the name of the level type in the county. :return:
[ "Return", "a", "name", "of", "the", "state", "or", "county", "or", "for", "other", "lowever", "levels", "the", "name", "of", "the", "level", "type", "in", "the", "county", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L668-L689
train
Metatab/geoid
geoid/core.py
Geoid.parse
def parse(cls, gvid, exception=True): """ Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return: """ if gvid == 'invalid': return cls.get_class('null')(0) if not bool(gvid): return None if not isinstance(gvid, six.string_types): raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid))) try: if not cls.sl: # Civick and ACS include the SL, so can call from base type. if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ sl = fn(gvid[0:cls.sl_width]) else: sl = cls.sl # Otherwise must use derived class. except ValueError as e: if exception: raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e))) else: return cls.get_class('null')(0) try: cls = cls.sl_map[sl] except KeyError: if exception: raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl)) else: return cls.get_class('null')(0) m = cls.regex.match(gvid) if not m: raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str)) d = m.groupdict() if not d: return None if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ d = {k: fn(v) for k, v in d.items()} try: del d['sl'] except KeyError: pass return cls(**d)
python
def parse(cls, gvid, exception=True): """ Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return: """ if gvid == 'invalid': return cls.get_class('null')(0) if not bool(gvid): return None if not isinstance(gvid, six.string_types): raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid))) try: if not cls.sl: # Civick and ACS include the SL, so can call from base type. if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ sl = fn(gvid[0:cls.sl_width]) else: sl = cls.sl # Otherwise must use derived class. except ValueError as e: if exception: raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e))) else: return cls.get_class('null')(0) try: cls = cls.sl_map[sl] except KeyError: if exception: raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl)) else: return cls.get_class('null')(0) m = cls.regex.match(gvid) if not m: raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str)) d = m.groupdict() if not d: return None if six.PY3: fn = cls.decode else: fn = cls.decode.__func__ d = {k: fn(v) for k, v in d.items()} try: del d['sl'] except KeyError: pass return cls(**d)
[ "def", "parse", "(", "cls", ",", "gvid", ",", "exception", "=", "True", ")", ":", "if", "gvid", "==", "'invalid'", ":", "return", "cls", ".", "get_class", "(", "'null'", ")", "(", "0", ")", "if", "not", "bool", "(", "gvid", ")", ":", "return", "None", "if", "not", "isinstance", "(", "gvid", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "\"Can't parse; not a string. Got a '{}' \"", ".", "format", "(", "type", "(", "gvid", ")", ")", ")", "try", ":", "if", "not", "cls", ".", "sl", ":", "# Civick and ACS include the SL, so can call from base type.", "if", "six", ".", "PY3", ":", "fn", "=", "cls", ".", "decode", "else", ":", "fn", "=", "cls", ".", "decode", ".", "__func__", "sl", "=", "fn", "(", "gvid", "[", "0", ":", "cls", ".", "sl_width", "]", ")", "else", ":", "sl", "=", "cls", ".", "sl", "# Otherwise must use derived class.", "except", "ValueError", "as", "e", ":", "if", "exception", ":", "raise", "ValueError", "(", "\"Failed to parse gvid '{}': {}\"", ".", "format", "(", "gvid", ",", "str", "(", "e", ")", ")", ")", "else", ":", "return", "cls", ".", "get_class", "(", "'null'", ")", "(", "0", ")", "try", ":", "cls", "=", "cls", ".", "sl_map", "[", "sl", "]", "except", "KeyError", ":", "if", "exception", ":", "raise", "ValueError", "(", "\"Failed to parse gvid '{}': Unknown summary level '{}' \"", ".", "format", "(", "gvid", ",", "sl", ")", ")", "else", ":", "return", "cls", ".", "get_class", "(", "'null'", ")", "(", "0", ")", "m", "=", "cls", ".", "regex", ".", "match", "(", "gvid", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "\"Failed to match '{}' to '{}' \"", ".", "format", "(", "gvid", ",", "cls", ".", "regex_str", ")", ")", "d", "=", "m", ".", "groupdict", "(", ")", "if", "not", "d", ":", "return", "None", "if", "six", ".", "PY3", ":", "fn", "=", "cls", ".", "decode", "else", ":", "fn", "=", "cls", ".", "decode", ".", "__func__", "d", "=", "{", "k", ":", "fn", "(", "v", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "}", "try", ":", "del", "d", "[", "'sl'", "]", "except", "KeyError", ":", "pass", "return", "cls", "(", "*", "*", "d", ")" ]
Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return:
[ "Parse", "a", "string", "value", "into", "the", "geoid", "of", "this", "class", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L714-L781
train
Metatab/geoid
geoid/core.py
Geoid.convert
def convert(self, root_cls): """Convert to another derived class. cls is the base class for the derived type, ie AcsGeoid, TigerGeoid, etc. """ d = self.__dict__ d['sl'] = self.sl try: cls = root_cls.get_class(root_cls.sl) except (AttributeError, TypeError): # Hopefully because root_cls is a module cls = root_cls.get_class(self.sl) return cls(**d)
python
def convert(self, root_cls): """Convert to another derived class. cls is the base class for the derived type, ie AcsGeoid, TigerGeoid, etc. """ d = self.__dict__ d['sl'] = self.sl try: cls = root_cls.get_class(root_cls.sl) except (AttributeError, TypeError): # Hopefully because root_cls is a module cls = root_cls.get_class(self.sl) return cls(**d)
[ "def", "convert", "(", "self", ",", "root_cls", ")", ":", "d", "=", "self", ".", "__dict__", "d", "[", "'sl'", "]", "=", "self", ".", "sl", "try", ":", "cls", "=", "root_cls", ".", "get_class", "(", "root_cls", ".", "sl", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "# Hopefully because root_cls is a module", "cls", "=", "root_cls", ".", "get_class", "(", "self", ".", "sl", ")", "return", "cls", "(", "*", "*", "d", ")" ]
Convert to another derived class. cls is the base class for the derived type, ie AcsGeoid, TigerGeoid, etc.
[ "Convert", "to", "another", "derived", "class", ".", "cls", "is", "the", "base", "class", "for", "the", "derived", "type", "ie", "AcsGeoid", "TigerGeoid", "etc", "." ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L783-L796
train
Metatab/geoid
geoid/core.py
Geoid.promote
def promote(self, level=None): """Convert to the next higher level summary level""" if level is None: if len(self.fields) < 2: if self.level in ('region', 'division', 'state', 'ua'): cls = self.get_class('us') else: return None else: cls = self.get_class(self.fields[-2]) else: cls = self.get_class(level) d = dict(self.__dict__.items()) d['sl'] = self.sl return cls(**d)
python
def promote(self, level=None): """Convert to the next higher level summary level""" if level is None: if len(self.fields) < 2: if self.level in ('region', 'division', 'state', 'ua'): cls = self.get_class('us') else: return None else: cls = self.get_class(self.fields[-2]) else: cls = self.get_class(level) d = dict(self.__dict__.items()) d['sl'] = self.sl return cls(**d)
[ "def", "promote", "(", "self", ",", "level", "=", "None", ")", ":", "if", "level", "is", "None", ":", "if", "len", "(", "self", ".", "fields", ")", "<", "2", ":", "if", "self", ".", "level", "in", "(", "'region'", ",", "'division'", ",", "'state'", ",", "'ua'", ")", ":", "cls", "=", "self", ".", "get_class", "(", "'us'", ")", "else", ":", "return", "None", "else", ":", "cls", "=", "self", ".", "get_class", "(", "self", ".", "fields", "[", "-", "2", "]", ")", "else", ":", "cls", "=", "self", ".", "get_class", "(", "level", ")", "d", "=", "dict", "(", "self", ".", "__dict__", ".", "items", "(", ")", ")", "d", "[", "'sl'", "]", "=", "self", ".", "sl", "return", "cls", "(", "*", "*", "d", ")" ]
Convert to the next higher level summary level
[ "Convert", "to", "the", "next", "higher", "level", "summary", "level" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L810-L828
train
Metatab/geoid
geoid/core.py
Geoid.allval
def allval(self): """Convert the last value to zero. This form represents the entire higher summary level at the granularity of the lower summary level. For example, for a county, it means 'All counties in the state' """ d = dict(self.__dict__.items()) d['sl'] = self.sl d[self.level] = 0 cls = self.get_class(self.sl) return cls(**d)
python
def allval(self): """Convert the last value to zero. This form represents the entire higher summary level at the granularity of the lower summary level. For example, for a county, it means 'All counties in the state' """ d = dict(self.__dict__.items()) d['sl'] = self.sl d[self.level] = 0 cls = self.get_class(self.sl) return cls(**d)
[ "def", "allval", "(", "self", ")", ":", "d", "=", "dict", "(", "self", ".", "__dict__", ".", "items", "(", ")", ")", "d", "[", "'sl'", "]", "=", "self", ".", "sl", "d", "[", "self", ".", "level", "]", "=", "0", "cls", "=", "self", ".", "get_class", "(", "self", ".", "sl", ")", "return", "cls", "(", "*", "*", "d", ")" ]
Convert the last value to zero. This form represents the entire higher summary level at the granularity of the lower summary level. For example, for a county, it means 'All counties in the state'
[ "Convert", "the", "last", "value", "to", "zero", ".", "This", "form", "represents", "the", "entire", "higher", "summary", "level", "at", "the", "granularity", "of", "the", "lower", "summary", "level", ".", "For", "example", "for", "a", "county", "it", "means", "All", "counties", "in", "the", "state" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L835-L846
train
Metatab/geoid
geoid/core.py
Geoid.nullval
def nullval(cls): """Create a new instance where all of the values are 0""" d = dict(cls.__dict__.items()) for k in d: d[k] = 0 d['sl'] = cls.sl d[cls.level] = 0 return cls(**d)
python
def nullval(cls): """Create a new instance where all of the values are 0""" d = dict(cls.__dict__.items()) for k in d: d[k] = 0 d['sl'] = cls.sl d[cls.level] = 0 return cls(**d)
[ "def", "nullval", "(", "cls", ")", ":", "d", "=", "dict", "(", "cls", ".", "__dict__", ".", "items", "(", ")", ")", "for", "k", "in", "d", ":", "d", "[", "k", "]", "=", "0", "d", "[", "'sl'", "]", "=", "cls", ".", "sl", "d", "[", "cls", ".", "level", "]", "=", "0", "return", "cls", "(", "*", "*", "d", ")" ]
Create a new instance where all of the values are 0
[ "Create", "a", "new", "instance", "where", "all", "of", "the", "values", "are", "0" ]
4b7769406b00e59376fb6046b42a2f8ed706b33b
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L849-L860
train
andy-z/ged4py
ged4py/detail/name.py
split_name
def split_name(name): """Extracts pieces of name from full name string. Full name can have one of these formats: <NAME_TEXT> | /<NAME_TEXT>/ | <NAME_TEXT> /<NAME_TEXT>/ | /<NAME_TEXT>/ <NAME_TEXT> | <NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT> <NAME_TEXT> can include almost anything excluding commas, numbers, special characters (though some test files use numbers for the names). Text between slashes is considered a surname, outside slashes - given name. This method splits full name into pieces at slashes, e.g.: "First /Last/" -> ("First", "Last", "") "/Last/ First" -> ("", "Last", "First") "First /Last/ Jr." -> ("First", "Last", "Jr.") "First Jr." -> ("First Jr.", "", "") :param str name: Full name string. :return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will be empty strings if they are not present in full string. """ given1, _, rem = name.partition("/") surname, _, given2 = rem.partition("/") return given1.strip(), surname.strip(), given2.strip()
python
def split_name(name): """Extracts pieces of name from full name string. Full name can have one of these formats: <NAME_TEXT> | /<NAME_TEXT>/ | <NAME_TEXT> /<NAME_TEXT>/ | /<NAME_TEXT>/ <NAME_TEXT> | <NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT> <NAME_TEXT> can include almost anything excluding commas, numbers, special characters (though some test files use numbers for the names). Text between slashes is considered a surname, outside slashes - given name. This method splits full name into pieces at slashes, e.g.: "First /Last/" -> ("First", "Last", "") "/Last/ First" -> ("", "Last", "First") "First /Last/ Jr." -> ("First", "Last", "Jr.") "First Jr." -> ("First Jr.", "", "") :param str name: Full name string. :return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will be empty strings if they are not present in full string. """ given1, _, rem = name.partition("/") surname, _, given2 = rem.partition("/") return given1.strip(), surname.strip(), given2.strip()
[ "def", "split_name", "(", "name", ")", ":", "given1", ",", "_", ",", "rem", "=", "name", ".", "partition", "(", "\"/\"", ")", "surname", ",", "_", ",", "given2", "=", "rem", ".", "partition", "(", "\"/\"", ")", "return", "given1", ".", "strip", "(", ")", ",", "surname", ".", "strip", "(", ")", ",", "given2", ".", "strip", "(", ")" ]
Extracts pieces of name from full name string. Full name can have one of these formats: <NAME_TEXT> | /<NAME_TEXT>/ | <NAME_TEXT> /<NAME_TEXT>/ | /<NAME_TEXT>/ <NAME_TEXT> | <NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT> <NAME_TEXT> can include almost anything excluding commas, numbers, special characters (though some test files use numbers for the names). Text between slashes is considered a surname, outside slashes - given name. This method splits full name into pieces at slashes, e.g.: "First /Last/" -> ("First", "Last", "") "/Last/ First" -> ("", "Last", "First") "First /Last/ Jr." -> ("First", "Last", "Jr.") "First Jr." -> ("First Jr.", "", "") :param str name: Full name string. :return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will be empty strings if they are not present in full string.
[ "Extracts", "pieces", "of", "name", "from", "full", "name", "string", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/name.py#L7-L35
train
andy-z/ged4py
ged4py/detail/name.py
parse_name_altree
def parse_name_altree(record): """Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name. """ name_tuple = split_name(record.value) if name_tuple[1] == '?': name_tuple = (name_tuple[0], '', name_tuple[2]) maiden = record.sub_tag_value("SURN") if maiden: # strip "(maiden)" from family name ending = '(' + maiden + ')' surname = name_tuple[1] if surname.endswith(ending): surname = surname[:-len(ending)].rstrip() if surname == '?': surname = '' name_tuple = (name_tuple[0], surname, name_tuple[2], maiden) return name_tuple
python
def parse_name_altree(record): """Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name. """ name_tuple = split_name(record.value) if name_tuple[1] == '?': name_tuple = (name_tuple[0], '', name_tuple[2]) maiden = record.sub_tag_value("SURN") if maiden: # strip "(maiden)" from family name ending = '(' + maiden + ')' surname = name_tuple[1] if surname.endswith(ending): surname = surname[:-len(ending)].rstrip() if surname == '?': surname = '' name_tuple = (name_tuple[0], surname, name_tuple[2], maiden) return name_tuple
[ "def", "parse_name_altree", "(", "record", ")", ":", "name_tuple", "=", "split_name", "(", "record", ".", "value", ")", "if", "name_tuple", "[", "1", "]", "==", "'?'", ":", "name_tuple", "=", "(", "name_tuple", "[", "0", "]", ",", "''", ",", "name_tuple", "[", "2", "]", ")", "maiden", "=", "record", ".", "sub_tag_value", "(", "\"SURN\"", ")", "if", "maiden", ":", "# strip \"(maiden)\" from family name", "ending", "=", "'('", "+", "maiden", "+", "')'", "surname", "=", "name_tuple", "[", "1", "]", "if", "surname", ".", "endswith", "(", "ending", ")", ":", "surname", "=", "surname", "[", ":", "-", "len", "(", "ending", ")", "]", ".", "rstrip", "(", ")", "if", "surname", "==", "'?'", ":", "surname", "=", "''", "name_tuple", "=", "(", "name_tuple", "[", "0", "]", ",", "surname", ",", "name_tuple", "[", "2", "]", ",", "maiden", ")", "return", "name_tuple" ]
Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name.
[ "Parse", "NAME", "structure", "assuming", "ALTREE", "dialect", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/name.py#L38-L85
train
andy-z/ged4py
ged4py/detail/name.py
parse_name_myher
def parse_name_myher(record): """Parse NAME structure assuming MYHERITAGE dialect. In MYHERITAGE dialect married name (if present) is saved as _MARNM sub-record. Maiden name is stored in SURN record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John 2 SURN Smith With maiden name: 1 NAME Jane /Ivanova/ 2 GIVN Jane 2 SURN Ivanova 2 _MARNM Smith No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers 2 SURN Daimler (-Benz) :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name. """ name_tuple = split_name(record.value) married = record.sub_tag_value("_MARNM") if married: maiden = name_tuple[1] name_tuple = (name_tuple[0], married, name_tuple[2], maiden) return name_tuple
python
def parse_name_myher(record): """Parse NAME structure assuming MYHERITAGE dialect. In MYHERITAGE dialect married name (if present) is saved as _MARNM sub-record. Maiden name is stored in SURN record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John 2 SURN Smith With maiden name: 1 NAME Jane /Ivanova/ 2 GIVN Jane 2 SURN Ivanova 2 _MARNM Smith No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers 2 SURN Daimler (-Benz) :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name. """ name_tuple = split_name(record.value) married = record.sub_tag_value("_MARNM") if married: maiden = name_tuple[1] name_tuple = (name_tuple[0], married, name_tuple[2], maiden) return name_tuple
[ "def", "parse_name_myher", "(", "record", ")", ":", "name_tuple", "=", "split_name", "(", "record", ".", "value", ")", "married", "=", "record", ".", "sub_tag_value", "(", "\"_MARNM\"", ")", "if", "married", ":", "maiden", "=", "name_tuple", "[", "1", "]", "name_tuple", "=", "(", "name_tuple", "[", "0", "]", ",", "married", ",", "name_tuple", "[", "2", "]", ",", "maiden", ")", "return", "name_tuple" ]
Parse NAME structure assuming MYHERITAGE dialect. In MYHERITAGE dialect married name (if present) is saved as _MARNM sub-record. Maiden name is stored in SURN record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John 2 SURN Smith With maiden name: 1 NAME Jane /Ivanova/ 2 GIVN Jane 2 SURN Ivanova 2 _MARNM Smith No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers 2 SURN Daimler (-Benz) :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name.
[ "Parse", "NAME", "structure", "assuming", "MYHERITAGE", "dialect", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/name.py#L88-L127
train
frasertweedale/ledgertools
ltlib/ui.py
number
def number(items): """Maps numbering onto given values""" n = len(items) if n == 0: return items places = str(int(math.log10(n) // 1 + 1)) format = '[{0[0]:' + str(int(places)) + 'd}] {0[1]}' return map( lambda x: format.format(x), enumerate(items) )
python
def number(items): """Maps numbering onto given values""" n = len(items) if n == 0: return items places = str(int(math.log10(n) // 1 + 1)) format = '[{0[0]:' + str(int(places)) + 'd}] {0[1]}' return map( lambda x: format.format(x), enumerate(items) )
[ "def", "number", "(", "items", ")", ":", "n", "=", "len", "(", "items", ")", "if", "n", "==", "0", ":", "return", "items", "places", "=", "str", "(", "int", "(", "math", ".", "log10", "(", "n", ")", "//", "1", "+", "1", ")", ")", "format", "=", "'[{0[0]:'", "+", "str", "(", "int", "(", "places", ")", ")", "+", "'d}] {0[1]}'", "return", "map", "(", "lambda", "x", ":", "format", ".", "format", "(", "x", ")", ",", "enumerate", "(", "items", ")", ")" ]
Maps numbering onto given values
[ "Maps", "numbering", "onto", "given", "values" ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L35-L45
train
frasertweedale/ledgertools
ltlib/ui.py
filter_yn
def filter_yn(string, default=None): """Return True if yes, False if no, or the default.""" if string.startswith(('Y', 'y')): return True elif string.startswith(('N', 'n')): return False elif not string and default is not None: return True if default else False raise InvalidInputError
python
def filter_yn(string, default=None): """Return True if yes, False if no, or the default.""" if string.startswith(('Y', 'y')): return True elif string.startswith(('N', 'n')): return False elif not string and default is not None: return True if default else False raise InvalidInputError
[ "def", "filter_yn", "(", "string", ",", "default", "=", "None", ")", ":", "if", "string", ".", "startswith", "(", "(", "'Y'", ",", "'y'", ")", ")", ":", "return", "True", "elif", "string", ".", "startswith", "(", "(", "'N'", ",", "'n'", ")", ")", ":", "return", "False", "elif", "not", "string", "and", "default", "is", "not", "None", ":", "return", "True", "if", "default", "else", "False", "raise", "InvalidInputError" ]
Return True if yes, False if no, or the default.
[ "Return", "True", "if", "yes", "False", "if", "no", "or", "the", "default", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L48-L56
train
frasertweedale/ledgertools
ltlib/ui.py
filter_int
def filter_int(string, default=None, start=None, stop=None): """Return the input integer, or the default.""" try: i = int(string) if start is not None and i < start: raise InvalidInputError("value too small") if stop is not None and i >= stop: raise InvalidInputError("value too large") return i except ValueError: if not string and default is not None: # empty string, default was given return default else: raise InvalidInputError
python
def filter_int(string, default=None, start=None, stop=None): """Return the input integer, or the default.""" try: i = int(string) if start is not None and i < start: raise InvalidInputError("value too small") if stop is not None and i >= stop: raise InvalidInputError("value too large") return i except ValueError: if not string and default is not None: # empty string, default was given return default else: raise InvalidInputError
[ "def", "filter_int", "(", "string", ",", "default", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "try", ":", "i", "=", "int", "(", "string", ")", "if", "start", "is", "not", "None", "and", "i", "<", "start", ":", "raise", "InvalidInputError", "(", "\"value too small\"", ")", "if", "stop", "is", "not", "None", "and", "i", ">=", "stop", ":", "raise", "InvalidInputError", "(", "\"value too large\"", ")", "return", "i", "except", "ValueError", ":", "if", "not", "string", "and", "default", "is", "not", "None", ":", "# empty string, default was given", "return", "default", "else", ":", "raise", "InvalidInputError" ]
Return the input integer, or the default.
[ "Return", "the", "input", "integer", "or", "the", "default", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L59-L73
train
frasertweedale/ledgertools
ltlib/ui.py
filter_decimal
def filter_decimal(string, default=None, lower=None, upper=None): """Return the input decimal number, or the default.""" try: d = decimal.Decimal(string) if lower is not None and d < lower: raise InvalidInputError("value too small") if upper is not None and d >= upper: raise InvalidInputError("value too large") return d except decimal.InvalidOperation: if not string and default is not None: # empty string, default was given return default else: raise InvalidInputError("invalid decimal number")
python
def filter_decimal(string, default=None, lower=None, upper=None): """Return the input decimal number, or the default.""" try: d = decimal.Decimal(string) if lower is not None and d < lower: raise InvalidInputError("value too small") if upper is not None and d >= upper: raise InvalidInputError("value too large") return d except decimal.InvalidOperation: if not string and default is not None: # empty string, default was given return default else: raise InvalidInputError("invalid decimal number")
[ "def", "filter_decimal", "(", "string", ",", "default", "=", "None", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "try", ":", "d", "=", "decimal", ".", "Decimal", "(", "string", ")", "if", "lower", "is", "not", "None", "and", "d", "<", "lower", ":", "raise", "InvalidInputError", "(", "\"value too small\"", ")", "if", "upper", "is", "not", "None", "and", "d", ">=", "upper", ":", "raise", "InvalidInputError", "(", "\"value too large\"", ")", "return", "d", "except", "decimal", ".", "InvalidOperation", ":", "if", "not", "string", "and", "default", "is", "not", "None", ":", "# empty string, default was given", "return", "default", "else", ":", "raise", "InvalidInputError", "(", "\"invalid decimal number\"", ")" ]
Return the input decimal number, or the default.
[ "Return", "the", "input", "decimal", "number", "or", "the", "default", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L76-L90
train
frasertweedale/ledgertools
ltlib/ui.py
filter_pastdate
def filter_pastdate(string, default=None): """Coerce to a date not beyond the current date If only a day is given, assumes the current month if that day has passed or is the current day, otherwise assumes the previous month. If a day and month are given, but no year, assumes the current year if the given date has passed (or is today), otherwise the previous year. """ if not string and default is not None: return default today = datetime.date.today() # split the string try: parts = map(int, re.split('\D+', string)) # split the string except ValueError: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) < 1 or len(parts) > 3: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) == 1: # no month or year given; append month parts.append(today.month - 1 if parts[0] > today.day else today.month) if parts[1] < 1: parts[1] = 12 if len(parts) == 2: # no year given; append year if parts[1] > today.month \ or parts[1] == today.month and parts[0] > today.day: parts.append(today.year - 1) else: parts.append(today.year) parts.reverse() try: date = datetime.date(*parts) if date > today: raise InvalidInputError("cannot choose a date in the future") return date except ValueError: print parts raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
python
def filter_pastdate(string, default=None): """Coerce to a date not beyond the current date If only a day is given, assumes the current month if that day has passed or is the current day, otherwise assumes the previous month. If a day and month are given, but no year, assumes the current year if the given date has passed (or is today), otherwise the previous year. """ if not string and default is not None: return default today = datetime.date.today() # split the string try: parts = map(int, re.split('\D+', string)) # split the string except ValueError: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) < 1 or len(parts) > 3: raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]") if len(parts) == 1: # no month or year given; append month parts.append(today.month - 1 if parts[0] > today.day else today.month) if parts[1] < 1: parts[1] = 12 if len(parts) == 2: # no year given; append year if parts[1] > today.month \ or parts[1] == today.month and parts[0] > today.day: parts.append(today.year - 1) else: parts.append(today.year) parts.reverse() try: date = datetime.date(*parts) if date > today: raise InvalidInputError("cannot choose a date in the future") return date except ValueError: print parts raise InvalidInputError("invalid date; use format: DD [MM [YYYY]]")
[ "def", "filter_pastdate", "(", "string", ",", "default", "=", "None", ")", ":", "if", "not", "string", "and", "default", "is", "not", "None", ":", "return", "default", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "# split the string", "try", ":", "parts", "=", "map", "(", "int", ",", "re", ".", "split", "(", "'\\D+'", ",", "string", ")", ")", "# split the string", "except", "ValueError", ":", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")", "if", "len", "(", "parts", ")", "<", "1", "or", "len", "(", "parts", ")", ">", "3", ":", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "# no month or year given; append month", "parts", ".", "append", "(", "today", ".", "month", "-", "1", "if", "parts", "[", "0", "]", ">", "today", ".", "day", "else", "today", ".", "month", ")", "if", "parts", "[", "1", "]", "<", "1", ":", "parts", "[", "1", "]", "=", "12", "if", "len", "(", "parts", ")", "==", "2", ":", "# no year given; append year", "if", "parts", "[", "1", "]", ">", "today", ".", "month", "or", "parts", "[", "1", "]", "==", "today", ".", "month", "and", "parts", "[", "0", "]", ">", "today", ".", "day", ":", "parts", ".", "append", "(", "today", ".", "year", "-", "1", ")", "else", ":", "parts", ".", "append", "(", "today", ".", "year", ")", "parts", ".", "reverse", "(", ")", "try", ":", "date", "=", "datetime", ".", "date", "(", "*", "parts", ")", "if", "date", ">", "today", ":", "raise", "InvalidInputError", "(", "\"cannot choose a date in the future\"", ")", "return", "date", "except", "ValueError", ":", "print", "parts", "raise", "InvalidInputError", "(", "\"invalid date; use format: DD [MM [YYYY]]\"", ")" ]
Coerce to a date not beyond the current date If only a day is given, assumes the current month if that day has passed or is the current day, otherwise assumes the previous month. If a day and month are given, but no year, assumes the current year if the given date has passed (or is today), otherwise the previous year.
[ "Coerce", "to", "a", "date", "not", "beyond", "the", "current", "date" ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L102-L148
train
frasertweedale/ledgertools
ltlib/ui.py
UI.input
def input(self, filter_fn, prompt): """Prompt user until valid input is received. RejectWarning is raised if a KeyboardInterrupt is caught. """ while True: try: return filter_fn(raw_input(prompt)) except InvalidInputError as e: if e.message: self.show('ERROR: ' + e.message) except KeyboardInterrupt: raise RejectWarning
python
def input(self, filter_fn, prompt): """Prompt user until valid input is received. RejectWarning is raised if a KeyboardInterrupt is caught. """ while True: try: return filter_fn(raw_input(prompt)) except InvalidInputError as e: if e.message: self.show('ERROR: ' + e.message) except KeyboardInterrupt: raise RejectWarning
[ "def", "input", "(", "self", ",", "filter_fn", ",", "prompt", ")", ":", "while", "True", ":", "try", ":", "return", "filter_fn", "(", "raw_input", "(", "prompt", ")", ")", "except", "InvalidInputError", "as", "e", ":", "if", "e", ".", "message", ":", "self", ".", "show", "(", "'ERROR: '", "+", "e", ".", "message", ")", "except", "KeyboardInterrupt", ":", "raise", "RejectWarning" ]
Prompt user until valid input is received. RejectWarning is raised if a KeyboardInterrupt is caught.
[ "Prompt", "user", "until", "valid", "input", "is", "received", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L161-L173
train
frasertweedale/ledgertools
ltlib/ui.py
UI.text
def text(self, prompt, default=None): """Prompts the user for some text, with optional default""" prompt = prompt if prompt is not None else 'Enter some text' prompt += " [{0}]: ".format(default) if default is not None else ': ' return self.input(curry(filter_text, default=default), prompt)
python
def text(self, prompt, default=None): """Prompts the user for some text, with optional default""" prompt = prompt if prompt is not None else 'Enter some text' prompt += " [{0}]: ".format(default) if default is not None else ': ' return self.input(curry(filter_text, default=default), prompt)
[ "def", "text", "(", "self", ",", "prompt", ",", "default", "=", "None", ")", ":", "prompt", "=", "prompt", "if", "prompt", "is", "not", "None", "else", "'Enter some text'", "prompt", "+=", "\" [{0}]: \"", ".", "format", "(", "default", ")", "if", "default", "is", "not", "None", "else", "': '", "return", "self", ".", "input", "(", "curry", "(", "filter_text", ",", "default", "=", "default", ")", ",", "prompt", ")" ]
Prompts the user for some text, with optional default
[ "Prompts", "the", "user", "for", "some", "text", "with", "optional", "default" ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L175-L179
train
frasertweedale/ledgertools
ltlib/ui.py
UI.decimal
def decimal(self, prompt, default=None, lower=None, upper=None): """Prompts user to input decimal, with optional default and bounds.""" prompt = prompt if prompt is not None else "Enter a decimal number" prompt += " [{0}]: ".format(default) if default is not None else ': ' return self.input( curry(filter_decimal, default=default, lower=lower, upper=upper), prompt )
python
def decimal(self, prompt, default=None, lower=None, upper=None): """Prompts user to input decimal, with optional default and bounds.""" prompt = prompt if prompt is not None else "Enter a decimal number" prompt += " [{0}]: ".format(default) if default is not None else ': ' return self.input( curry(filter_decimal, default=default, lower=lower, upper=upper), prompt )
[ "def", "decimal", "(", "self", ",", "prompt", ",", "default", "=", "None", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "prompt", "=", "prompt", "if", "prompt", "is", "not", "None", "else", "\"Enter a decimal number\"", "prompt", "+=", "\" [{0}]: \"", ".", "format", "(", "default", ")", "if", "default", "is", "not", "None", "else", "': '", "return", "self", ".", "input", "(", "curry", "(", "filter_decimal", ",", "default", "=", "default", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ")", ",", "prompt", ")" ]
Prompts user to input decimal, with optional default and bounds.
[ "Prompts", "user", "to", "input", "decimal", "with", "optional", "default", "and", "bounds", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L189-L196
train
frasertweedale/ledgertools
ltlib/ui.py
UI.pastdate
def pastdate(self, prompt, default=None): """Prompts user to input a date in the past.""" prompt = prompt if prompt is not None else "Enter a past date" if default is not None: prompt += " [" + default.strftime('%d %m %Y') + "]" prompt += ': ' return self.input(curry(filter_pastdate, default=default), prompt)
python
def pastdate(self, prompt, default=None): """Prompts user to input a date in the past.""" prompt = prompt if prompt is not None else "Enter a past date" if default is not None: prompt += " [" + default.strftime('%d %m %Y') + "]" prompt += ': ' return self.input(curry(filter_pastdate, default=default), prompt)
[ "def", "pastdate", "(", "self", ",", "prompt", ",", "default", "=", "None", ")", ":", "prompt", "=", "prompt", "if", "prompt", "is", "not", "None", "else", "\"Enter a past date\"", "if", "default", "is", "not", "None", ":", "prompt", "+=", "\" [\"", "+", "default", ".", "strftime", "(", "'%d %m %Y'", ")", "+", "\"]\"", "prompt", "+=", "': '", "return", "self", ".", "input", "(", "curry", "(", "filter_pastdate", ",", "default", "=", "default", ")", ",", "prompt", ")" ]
Prompts user to input a date in the past.
[ "Prompts", "user", "to", "input", "a", "date", "in", "the", "past", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L198-L204
train
frasertweedale/ledgertools
ltlib/ui.py
UI.choose
def choose(self, prompt, items, default=None): """Prompts the user to choose one item from a list. The default, if provided, is an index; the item of that index will be returned. """ if default is not None and (default >= len(items) or default < 0): raise IndexError prompt = prompt if prompt is not None else "Choose from following:" self.show(prompt + '\n') self.show("\n".join(number(items))) # show the items prompt = "Enter number of chosen item" prompt += " [{0}]: ".format(default) if default is not None else ': ' return items[self.input( curry(filter_int, default=default, start=0, stop=len(items)), prompt )]
python
def choose(self, prompt, items, default=None): """Prompts the user to choose one item from a list. The default, if provided, is an index; the item of that index will be returned. """ if default is not None and (default >= len(items) or default < 0): raise IndexError prompt = prompt if prompt is not None else "Choose from following:" self.show(prompt + '\n') self.show("\n".join(number(items))) # show the items prompt = "Enter number of chosen item" prompt += " [{0}]: ".format(default) if default is not None else ': ' return items[self.input( curry(filter_int, default=default, start=0, stop=len(items)), prompt )]
[ "def", "choose", "(", "self", ",", "prompt", ",", "items", ",", "default", "=", "None", ")", ":", "if", "default", "is", "not", "None", "and", "(", "default", ">=", "len", "(", "items", ")", "or", "default", "<", "0", ")", ":", "raise", "IndexError", "prompt", "=", "prompt", "if", "prompt", "is", "not", "None", "else", "\"Choose from following:\"", "self", ".", "show", "(", "prompt", "+", "'\\n'", ")", "self", ".", "show", "(", "\"\\n\"", ".", "join", "(", "number", "(", "items", ")", ")", ")", "# show the items", "prompt", "=", "\"Enter number of chosen item\"", "prompt", "+=", "\" [{0}]: \"", ".", "format", "(", "default", ")", "if", "default", "is", "not", "None", "else", "': '", "return", "items", "[", "self", ".", "input", "(", "curry", "(", "filter_int", ",", "default", "=", "default", ",", "start", "=", "0", ",", "stop", "=", "len", "(", "items", ")", ")", ",", "prompt", ")", "]" ]
Prompts the user to choose one item from a list. The default, if provided, is an index; the item of that index will be returned.
[ "Prompts", "the", "user", "to", "choose", "one", "item", "from", "a", "list", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/ui.py#L217-L233
train
lekhakpadmanabh/Summarizer
smrzr/core.py
goose_extractor
def goose_extractor(url): '''webpage extraction using Goose Library''' article = Goose().extract(url=url) return article.title, article.meta_description,\ article.cleaned_text
python
def goose_extractor(url): '''webpage extraction using Goose Library''' article = Goose().extract(url=url) return article.title, article.meta_description,\ article.cleaned_text
[ "def", "goose_extractor", "(", "url", ")", ":", "article", "=", "Goose", "(", ")", ".", "extract", "(", "url", "=", "url", ")", "return", "article", ".", "title", ",", "article", ".", "meta_description", ",", "article", ".", "cleaned_text" ]
webpage extraction using Goose Library
[ "webpage", "extraction", "using", "Goose", "Library" ]
143456a48217905c720d87331f410e5c8b4e24aa
https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L24-L30
train
lekhakpadmanabh/Summarizer
smrzr/core.py
_tokenize
def _tokenize(sentence): '''Tokenizer and Stemmer''' _tokens = nltk.word_tokenize(sentence) tokens = [stemmer.stem(tk) for tk in _tokens] return tokens
python
def _tokenize(sentence): '''Tokenizer and Stemmer''' _tokens = nltk.word_tokenize(sentence) tokens = [stemmer.stem(tk) for tk in _tokens] return tokens
[ "def", "_tokenize", "(", "sentence", ")", ":", "_tokens", "=", "nltk", ".", "word_tokenize", "(", "sentence", ")", "tokens", "=", "[", "stemmer", ".", "stem", "(", "tk", ")", "for", "tk", "in", "_tokens", "]", "return", "tokens" ]
Tokenizer and Stemmer
[ "Tokenizer", "and", "Stemmer" ]
143456a48217905c720d87331f410e5c8b4e24aa
https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L32-L37
train