repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
evolbioinfo/pastml
pastml/tree.py
name_tree
def name_tree(tree): """ Names all the tree nodes that are not named or have non-unique names, with unique names. :param tree: tree to be named :type tree: ete3.Tree :return: void, modifies the original tree """ existing_names = Counter((_.name for _ in tree.traverse() if _.name)) if sum(1 for _ in tree.traverse()) == len(existing_names): return i = 0 existing_names = Counter() for node in tree.traverse('preorder'): name = node.name if node.is_leaf() else ('root' if node.is_root() else None) while name is None or name in existing_names: name = '{}{}'.format('t' if node.is_leaf() else 'n', i) i += 1 node.name = name existing_names[name] += 1
python
def name_tree(tree): """ Names all the tree nodes that are not named or have non-unique names, with unique names. :param tree: tree to be named :type tree: ete3.Tree :return: void, modifies the original tree """ existing_names = Counter((_.name for _ in tree.traverse() if _.name)) if sum(1 for _ in tree.traverse()) == len(existing_names): return i = 0 existing_names = Counter() for node in tree.traverse('preorder'): name = node.name if node.is_leaf() else ('root' if node.is_root() else None) while name is None or name in existing_names: name = '{}{}'.format('t' if node.is_leaf() else 'n', i) i += 1 node.name = name existing_names[name] += 1
[ "def", "name_tree", "(", "tree", ")", ":", "existing_names", "=", "Counter", "(", "(", "_", ".", "name", "for", "_", "in", "tree", ".", "traverse", "(", ")", "if", "_", ".", "name", ")", ")", "if", "sum", "(", "1", "for", "_", "in", "tree", ".", "traverse", "(", ")", ")", "==", "len", "(", "existing_names", ")", ":", "return", "i", "=", "0", "existing_names", "=", "Counter", "(", ")", "for", "node", "in", "tree", ".", "traverse", "(", "'preorder'", ")", ":", "name", "=", "node", ".", "name", "if", "node", ".", "is_leaf", "(", ")", "else", "(", "'root'", "if", "node", ".", "is_root", "(", ")", "else", "None", ")", "while", "name", "is", "None", "or", "name", "in", "existing_names", ":", "name", "=", "'{}{}'", ".", "format", "(", "'t'", "if", "node", ".", "is_leaf", "(", ")", "else", "'n'", ",", "i", ")", "i", "+=", "1", "node", ".", "name", "=", "name", "existing_names", "[", "name", "]", "+=", "1" ]
Names all the tree nodes that are not named or have non-unique names, with unique names. :param tree: tree to be named :type tree: ete3.Tree :return: void, modifies the original tree
[ "Names", "all", "the", "tree", "nodes", "that", "are", "not", "named", "or", "have", "non", "-", "unique", "names", "with", "unique", "names", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/tree.py#L46-L66
train
geophysics-ubonn/reda
lib/reda/importers/utils/decorators.py
enable_result_transforms
def enable_result_transforms(func): """Decorator that tries to use the object provided using a kwarg called 'electrode_transformator' to transform the return values of an import function. It is intended to be used to transform electrode numbers and locations, i.e. for use in roll-along-measurement schemes. The transformator object must have a function .transform, which takes three parameters: data, electrode, topography and returns three correspondingly transformed objects. """ @functools.wraps(func) def wrapper(*args, **kwargs): func_transformator = kwargs.pop('electrode_transformator', None) data, electrodes, topography = func(*args, **kwargs) if func_transformator is not None: data_transformed, electrodes_transformed, \ topography_transformed = func_transformator.transform( data, electrodes, topography ) return data_transformed, electrodes_transformed, \ topography_transformed else: return data, electrodes, topography return wrapper
python
def enable_result_transforms(func): """Decorator that tries to use the object provided using a kwarg called 'electrode_transformator' to transform the return values of an import function. It is intended to be used to transform electrode numbers and locations, i.e. for use in roll-along-measurement schemes. The transformator object must have a function .transform, which takes three parameters: data, electrode, topography and returns three correspondingly transformed objects. """ @functools.wraps(func) def wrapper(*args, **kwargs): func_transformator = kwargs.pop('electrode_transformator', None) data, electrodes, topography = func(*args, **kwargs) if func_transformator is not None: data_transformed, electrodes_transformed, \ topography_transformed = func_transformator.transform( data, electrodes, topography ) return data_transformed, electrodes_transformed, \ topography_transformed else: return data, electrodes, topography return wrapper
[ "def", "enable_result_transforms", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func_transformator", "=", "kwargs", ".", "pop", "(", "'electrode_transformator'", ",", "None", ")", "data", ",", "electrodes", ",", "topography", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "func_transformator", "is", "not", "None", ":", "data_transformed", ",", "electrodes_transformed", ",", "topography_transformed", "=", "func_transformator", ".", "transform", "(", "data", ",", "electrodes", ",", "topography", ")", "return", "data_transformed", ",", "electrodes_transformed", ",", "topography_transformed", "else", ":", "return", "data", ",", "electrodes", ",", "topography", "return", "wrapper" ]
Decorator that tries to use the object provided using a kwarg called 'electrode_transformator' to transform the return values of an import function. It is intended to be used to transform electrode numbers and locations, i.e. for use in roll-along-measurement schemes. The transformator object must have a function .transform, which takes three parameters: data, electrode, topography and returns three correspondingly transformed objects.
[ "Decorator", "that", "tries", "to", "use", "the", "object", "provided", "using", "a", "kwarg", "called", "electrode_transformator", "to", "transform", "the", "return", "values", "of", "an", "import", "function", ".", "It", "is", "intended", "to", "be", "used", "to", "transform", "electrode", "numbers", "and", "locations", "i", ".", "e", ".", "for", "use", "in", "roll", "-", "along", "-", "measurement", "schemes", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/utils/decorators.py#L4-L28
train
cfobel/webcam-recorder
webcam_recorder/video_view.py
RecordControl.record_path
def record_path(self): ''' If recording is not enabled, return `None` as record path. ''' if self.record_button.get_property('active') and (self.record_path_selector .selected_path): return self.record_path_selector.selected_path else: return None
python
def record_path(self): ''' If recording is not enabled, return `None` as record path. ''' if self.record_button.get_property('active') and (self.record_path_selector .selected_path): return self.record_path_selector.selected_path else: return None
[ "def", "record_path", "(", "self", ")", ":", "if", "self", ".", "record_button", ".", "get_property", "(", "'active'", ")", "and", "(", "self", ".", "record_path_selector", ".", "selected_path", ")", ":", "return", "self", ".", "record_path_selector", ".", "selected_path", "else", ":", "return", "None" ]
If recording is not enabled, return `None` as record path.
[ "If", "recording", "is", "not", "enabled", "return", "None", "as", "record", "path", "." ]
ffeb57c9044033fbea6372b3e642b83fd42dea87
https://github.com/cfobel/webcam-recorder/blob/ffeb57c9044033fbea6372b3e642b83fd42dea87/webcam_recorder/video_view.py#L196-L204
train
geophysics-ubonn/reda
lib/reda/utils/geom_fac_crtomo.py
_write_crmod_file
def _write_crmod_file(filename): """Write a valid crmod configuration file to filename. TODO: Modify configuration according to, e.g., 2D """ crmod_lines = [ '***FILES***', '../grid/elem.dat', '../grid/elec.dat', '../rho/rho.dat', '../config/config.dat', 'F ! potentials ?', '../mod/pot/pot.dat', 'T ! measurements ?', '../mod/volt.dat', 'F ! sensitivities ?', '../mod/sens/sens.dat', 'F ! another dataset ?', '1 ! 2D (=0) or 2.5D (=1)', 'F ! fictitious sink ?', '1660 ! fictitious sink node number', 'F ! boundary values ?', 'boundary.dat', ] with open(filename, 'w') as fid: [fid.write(line + '\n') for line in crmod_lines]
python
def _write_crmod_file(filename): """Write a valid crmod configuration file to filename. TODO: Modify configuration according to, e.g., 2D """ crmod_lines = [ '***FILES***', '../grid/elem.dat', '../grid/elec.dat', '../rho/rho.dat', '../config/config.dat', 'F ! potentials ?', '../mod/pot/pot.dat', 'T ! measurements ?', '../mod/volt.dat', 'F ! sensitivities ?', '../mod/sens/sens.dat', 'F ! another dataset ?', '1 ! 2D (=0) or 2.5D (=1)', 'F ! fictitious sink ?', '1660 ! fictitious sink node number', 'F ! boundary values ?', 'boundary.dat', ] with open(filename, 'w') as fid: [fid.write(line + '\n') for line in crmod_lines]
[ "def", "_write_crmod_file", "(", "filename", ")", ":", "crmod_lines", "=", "[", "'***FILES***'", ",", "'../grid/elem.dat'", ",", "'../grid/elec.dat'", ",", "'../rho/rho.dat'", ",", "'../config/config.dat'", ",", "'F ! potentials ?'", ",", "'../mod/pot/pot.dat'", ",", "'T ! measurements ?'", ",", "'../mod/volt.dat'", ",", "'F ! sensitivities ?'", ",", "'../mod/sens/sens.dat'", ",", "'F ! another dataset ?'", ",", "'1 ! 2D (=0) or 2.5D (=1)'", ",", "'F ! fictitious sink ?'", ",", "'1660 ! fictitious sink node number'", ",", "'F ! boundary values ?'", ",", "'boundary.dat'", ",", "]", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fid", ":", "[", "fid", ".", "write", "(", "line", "+", "'\\n'", ")", "for", "line", "in", "crmod_lines", "]" ]
Write a valid crmod configuration file to filename. TODO: Modify configuration according to, e.g., 2D
[ "Write", "a", "valid", "crmod", "configuration", "file", "to", "filename", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geom_fac_crtomo.py#L39-L65
train
kaustavdm/pyAvroPhonetic
pyavrophonetic/utils/__init__.py
utf
def utf(text): """Shortcut funnction for encoding given text with utf-8""" try: output = unicode(text, encoding='utf-8') except UnicodeDecodeError: output = text except TypeError: output = text return output
python
def utf(text): """Shortcut funnction for encoding given text with utf-8""" try: output = unicode(text, encoding='utf-8') except UnicodeDecodeError: output = text except TypeError: output = text return output
[ "def", "utf", "(", "text", ")", ":", "try", ":", "output", "=", "unicode", "(", "text", ",", "encoding", "=", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "output", "=", "text", "except", "TypeError", ":", "output", "=", "text", "return", "output" ]
Shortcut funnction for encoding given text with utf-8
[ "Shortcut", "funnction", "for", "encoding", "given", "text", "with", "utf", "-", "8" ]
26b7d567d8db025f2cac4de817e716390d7ac337
https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/__init__.py#L26-L34
train
andy-z/ged4py
ged4py/detail/io.py
check_bom
def check_bom(file): """Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0. """ # try to read first three bytes lead = file.read(3) if len(lead) == 3 and lead == codecs.BOM_UTF8: # UTF-8, position is already OK, use canonical name return codecs.lookup('utf-8').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_BE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-be').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_LE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-le').name else: # no BOM, rewind file.seek(-len(lead), os.SEEK_CUR) return None
python
def check_bom(file): """Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0. """ # try to read first three bytes lead = file.read(3) if len(lead) == 3 and lead == codecs.BOM_UTF8: # UTF-8, position is already OK, use canonical name return codecs.lookup('utf-8').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_BE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-be').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_LE: # need to backup one character if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-le').name else: # no BOM, rewind file.seek(-len(lead), os.SEEK_CUR) return None
[ "def", "check_bom", "(", "file", ")", ":", "# try to read first three bytes", "lead", "=", "file", ".", "read", "(", "3", ")", "if", "len", "(", "lead", ")", "==", "3", "and", "lead", "==", "codecs", ".", "BOM_UTF8", ":", "# UTF-8, position is already OK, use canonical name", "return", "codecs", ".", "lookup", "(", "'utf-8'", ")", ".", "name", "elif", "len", "(", "lead", ")", ">=", "2", "and", "lead", "[", ":", "2", "]", "==", "codecs", ".", "BOM_UTF16_BE", ":", "# need to backup one character", "if", "len", "(", "lead", ")", "==", "3", ":", "file", ".", "seek", "(", "-", "1", ",", "os", ".", "SEEK_CUR", ")", "return", "codecs", ".", "lookup", "(", "'utf-16-be'", ")", ".", "name", "elif", "len", "(", "lead", ")", ">=", "2", "and", "lead", "[", ":", "2", "]", "==", "codecs", ".", "BOM_UTF16_LE", ":", "# need to backup one character", "if", "len", "(", "lead", ")", "==", "3", ":", "file", ".", "seek", "(", "-", "1", ",", "os", ".", "SEEK_CUR", ")", "return", "codecs", ".", "lookup", "(", "'utf-16-le'", ")", ".", "name", "else", ":", "# no BOM, rewind", "file", ".", "seek", "(", "-", "len", "(", "lead", ")", ",", "os", ".", "SEEK_CUR", ")", "return", "None" ]
Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0.
[ "Determines", "file", "codec", "from", "from", "its", "BOM", "record", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/io.py#L10-L37
train
andy-z/ged4py
ged4py/detail/io.py
guess_lineno
def guess_lineno(file): """Guess current line number in a file. Guessing is done in a very crude way - scanning file from beginning until current offset and counting newlines. Only meant to be used in exceptional cases - generating line number for error message. """ offset = file.tell() file.seek(0) startpos = 0 lineno = 1 # looks like file.read() return bytes in python3 # so I need more complicated algorithm here while True: line = file.readline() if not line: break endpos = file.tell() if startpos <= offset < endpos: break lineno += 1 file.seek(offset) return lineno
python
def guess_lineno(file): """Guess current line number in a file. Guessing is done in a very crude way - scanning file from beginning until current offset and counting newlines. Only meant to be used in exceptional cases - generating line number for error message. """ offset = file.tell() file.seek(0) startpos = 0 lineno = 1 # looks like file.read() return bytes in python3 # so I need more complicated algorithm here while True: line = file.readline() if not line: break endpos = file.tell() if startpos <= offset < endpos: break lineno += 1 file.seek(offset) return lineno
[ "def", "guess_lineno", "(", "file", ")", ":", "offset", "=", "file", ".", "tell", "(", ")", "file", ".", "seek", "(", "0", ")", "startpos", "=", "0", "lineno", "=", "1", "# looks like file.read() return bytes in python3", "# so I need more complicated algorithm here", "while", "True", ":", "line", "=", "file", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "endpos", "=", "file", ".", "tell", "(", ")", "if", "startpos", "<=", "offset", "<", "endpos", ":", "break", "lineno", "+=", "1", "file", ".", "seek", "(", "offset", ")", "return", "lineno" ]
Guess current line number in a file. Guessing is done in a very crude way - scanning file from beginning until current offset and counting newlines. Only meant to be used in exceptional cases - generating line number for error message.
[ "Guess", "current", "line", "number", "in", "a", "file", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/io.py#L40-L62
train
pennlabs/penn-sdk-python
penn/libraries.py
search
def search(query): """Search Penn Libraries Franklin for documents The maximum pagesize currently is 50. """ params = { 's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query } return requests.get(BASE_URL, params=params, timeout=10).json()
python
def search(query): """Search Penn Libraries Franklin for documents The maximum pagesize currently is 50. """ params = { 's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query } return requests.get(BASE_URL, params=params, timeout=10).json()
[ "def", "search", "(", "query", ")", ":", "params", "=", "{", "'s.cmd'", ":", "'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)'", "%", "query", "}", "return", "requests", ".", "get", "(", "BASE_URL", ",", "params", "=", "params", ",", "timeout", "=", "10", ")", ".", "json", "(", ")" ]
Search Penn Libraries Franklin for documents The maximum pagesize currently is 50.
[ "Search", "Penn", "Libraries", "Franklin", "for", "documents", "The", "maximum", "pagesize", "currently", "is", "50", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/libraries.py#L7-L14
train
andy-z/ged4py
ged4py/model.py
make_record
def make_record(level, xref_id, tag, value, sub_records, offset, dialect, parser=None): """Create Record instance based on parameters. :param int level: Record level number. :param str xref_id: Record reference ID, possibly empty. :param str tag: Tag name. :param value: Record value, possibly empty. Value can be None, bytes, or string object, if it is bytes then it should be decoded into strings before calling freeze(), this is normally done by the parser which knows about encodings. :param list sub_records: Initial list of subordinate records, possibly empty. List can be updated later. :param int offset: Record location in a file. :param dialect: One of DIALECT_* constants. :param parser: Instance of `GedcomReader` class, only needed for records whose walue is a pointer. :return: Instance of :py:class:`Record` (or one of its subclasses). """ # value can be bytes or string so we check for both, 64 is code for '@' if value and len(value) > 2 and \ ((value[0] == '@' and value[-1] == '@') or (value[0] == 64 and value[-1] == 64)): # this looks like a <pointer>, make a Pointer record klass = Pointer rec = klass(parser) else: klass = _tag_class.get(tag, Record) rec = klass() rec.level = level rec.xref_id = xref_id rec.tag = tag rec.value = value rec.sub_records = sub_records rec.offset = offset rec.dialect = dialect return rec
python
def make_record(level, xref_id, tag, value, sub_records, offset, dialect, parser=None): """Create Record instance based on parameters. :param int level: Record level number. :param str xref_id: Record reference ID, possibly empty. :param str tag: Tag name. :param value: Record value, possibly empty. Value can be None, bytes, or string object, if it is bytes then it should be decoded into strings before calling freeze(), this is normally done by the parser which knows about encodings. :param list sub_records: Initial list of subordinate records, possibly empty. List can be updated later. :param int offset: Record location in a file. :param dialect: One of DIALECT_* constants. :param parser: Instance of `GedcomReader` class, only needed for records whose walue is a pointer. :return: Instance of :py:class:`Record` (or one of its subclasses). """ # value can be bytes or string so we check for both, 64 is code for '@' if value and len(value) > 2 and \ ((value[0] == '@' and value[-1] == '@') or (value[0] == 64 and value[-1] == 64)): # this looks like a <pointer>, make a Pointer record klass = Pointer rec = klass(parser) else: klass = _tag_class.get(tag, Record) rec = klass() rec.level = level rec.xref_id = xref_id rec.tag = tag rec.value = value rec.sub_records = sub_records rec.offset = offset rec.dialect = dialect return rec
[ "def", "make_record", "(", "level", ",", "xref_id", ",", "tag", ",", "value", ",", "sub_records", ",", "offset", ",", "dialect", ",", "parser", "=", "None", ")", ":", "# value can be bytes or string so we check for both, 64 is code for '@'", "if", "value", "and", "len", "(", "value", ")", ">", "2", "and", "(", "(", "value", "[", "0", "]", "==", "'@'", "and", "value", "[", "-", "1", "]", "==", "'@'", ")", "or", "(", "value", "[", "0", "]", "==", "64", "and", "value", "[", "-", "1", "]", "==", "64", ")", ")", ":", "# this looks like a <pointer>, make a Pointer record", "klass", "=", "Pointer", "rec", "=", "klass", "(", "parser", ")", "else", ":", "klass", "=", "_tag_class", ".", "get", "(", "tag", ",", "Record", ")", "rec", "=", "klass", "(", ")", "rec", ".", "level", "=", "level", "rec", ".", "xref_id", "=", "xref_id", "rec", ".", "tag", "=", "tag", "rec", ".", "value", "=", "value", "rec", ".", "sub_records", "=", "sub_records", "rec", ".", "offset", "=", "offset", "rec", ".", "dialect", "=", "dialect", "return", "rec" ]
Create Record instance based on parameters. :param int level: Record level number. :param str xref_id: Record reference ID, possibly empty. :param str tag: Tag name. :param value: Record value, possibly empty. Value can be None, bytes, or string object, if it is bytes then it should be decoded into strings before calling freeze(), this is normally done by the parser which knows about encodings. :param list sub_records: Initial list of subordinate records, possibly empty. List can be updated later. :param int offset: Record location in a file. :param dialect: One of DIALECT_* constants. :param parser: Instance of `GedcomReader` class, only needed for records whose walue is a pointer. :return: Instance of :py:class:`Record` (or one of its subclasses).
[ "Create", "Record", "instance", "based", "on", "parameters", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L413-L450
train
andy-z/ged4py
ged4py/model.py
Record.sub_tag
def sub_tag(self, path, follow=True): """Returns direct sub-record with given tag name or None. Path can be a simple tag name, in which case the first direct sub-record of this record with the matching tag is returned. Path can also consist of several tags separated by slashes, in that case sub-records are searched recursively. If `follow` is True then pointer records are resolved and pointed record is used instead of pointer record, this also works for all intermediate records in a path. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: `Record` instance or `None` if sub-record with a given tag does not exist. """ tags = path.split('/') rec = self for tag in tags: recs = [x for x in (rec.sub_records or []) if x.tag == tag] if not recs: return None rec = recs[0] if follow and isinstance(rec, Pointer): rec = rec.ref return rec
python
def sub_tag(self, path, follow=True): """Returns direct sub-record with given tag name or None. Path can be a simple tag name, in which case the first direct sub-record of this record with the matching tag is returned. Path can also consist of several tags separated by slashes, in that case sub-records are searched recursively. If `follow` is True then pointer records are resolved and pointed record is used instead of pointer record, this also works for all intermediate records in a path. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: `Record` instance or `None` if sub-record with a given tag does not exist. """ tags = path.split('/') rec = self for tag in tags: recs = [x for x in (rec.sub_records or []) if x.tag == tag] if not recs: return None rec = recs[0] if follow and isinstance(rec, Pointer): rec = rec.ref return rec
[ "def", "sub_tag", "(", "self", ",", "path", ",", "follow", "=", "True", ")", ":", "tags", "=", "path", ".", "split", "(", "'/'", ")", "rec", "=", "self", "for", "tag", "in", "tags", ":", "recs", "=", "[", "x", "for", "x", "in", "(", "rec", ".", "sub_records", "or", "[", "]", ")", "if", "x", ".", "tag", "==", "tag", "]", "if", "not", "recs", ":", "return", "None", "rec", "=", "recs", "[", "0", "]", "if", "follow", "and", "isinstance", "(", "rec", ",", "Pointer", ")", ":", "rec", "=", "rec", ".", "ref", "return", "rec" ]
Returns direct sub-record with given tag name or None. Path can be a simple tag name, in which case the first direct sub-record of this record with the matching tag is returned. Path can also consist of several tags separated by slashes, in that case sub-records are searched recursively. If `follow` is True then pointer records are resolved and pointed record is used instead of pointer record, this also works for all intermediate records in a path. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: `Record` instance or `None` if sub-record with a given tag does not exist.
[ "Returns", "direct", "sub", "-", "record", "with", "given", "tag", "name", "or", "None", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L69-L95
train
andy-z/ged4py
ged4py/model.py
Record.sub_tag_value
def sub_tag_value(self, path, follow=True): """Returns value of a direct sub-record or None. Works as :py:meth:`sub_tag` but returns value of a sub-record instead of sub-record itself. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: String or `None` if sub-record with a given tag does not exist. """ rec = self.sub_tag(path, follow) if rec: return rec.value return None
python
def sub_tag_value(self, path, follow=True): """Returns value of a direct sub-record or None. Works as :py:meth:`sub_tag` but returns value of a sub-record instead of sub-record itself. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: String or `None` if sub-record with a given tag does not exist. """ rec = self.sub_tag(path, follow) if rec: return rec.value return None
[ "def", "sub_tag_value", "(", "self", ",", "path", ",", "follow", "=", "True", ")", ":", "rec", "=", "self", ".", "sub_tag", "(", "path", ",", "follow", ")", "if", "rec", ":", "return", "rec", ".", "value", "return", "None" ]
Returns value of a direct sub-record or None. Works as :py:meth:`sub_tag` but returns value of a sub-record instead of sub-record itself. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: String or `None` if sub-record with a given tag does not exist.
[ "Returns", "value", "of", "a", "direct", "sub", "-", "record", "or", "None", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L97-L111
train
andy-z/ged4py
ged4py/model.py
Record.sub_tags
def sub_tags(self, *tags, **kw): """Returns list of direct sub-records matching any tag name. Unlike :py:meth:`sub_tag` method this method does not support hierarchical paths and does not resolve pointers. :param str tags: Names of the sub-record tag :param kw: Keyword arguments, only recognized keyword is `follow` with the same meaning as in :py:meth:`sub_tag`. :return: List of `Records`, possibly empty. """ records = [x for x in self.sub_records if x.tag in tags] if kw.get('follow', True): records = [rec.ref if isinstance(rec, Pointer) else rec for rec in records] return records
python
def sub_tags(self, *tags, **kw): """Returns list of direct sub-records matching any tag name. Unlike :py:meth:`sub_tag` method this method does not support hierarchical paths and does not resolve pointers. :param str tags: Names of the sub-record tag :param kw: Keyword arguments, only recognized keyword is `follow` with the same meaning as in :py:meth:`sub_tag`. :return: List of `Records`, possibly empty. """ records = [x for x in self.sub_records if x.tag in tags] if kw.get('follow', True): records = [rec.ref if isinstance(rec, Pointer) else rec for rec in records] return records
[ "def", "sub_tags", "(", "self", ",", "*", "tags", ",", "*", "*", "kw", ")", ":", "records", "=", "[", "x", "for", "x", "in", "self", ".", "sub_records", "if", "x", ".", "tag", "in", "tags", "]", "if", "kw", ".", "get", "(", "'follow'", ",", "True", ")", ":", "records", "=", "[", "rec", ".", "ref", "if", "isinstance", "(", "rec", ",", "Pointer", ")", "else", "rec", "for", "rec", "in", "records", "]", "return", "records" ]
Returns list of direct sub-records matching any tag name. Unlike :py:meth:`sub_tag` method this method does not support hierarchical paths and does not resolve pointers. :param str tags: Names of the sub-record tag :param kw: Keyword arguments, only recognized keyword is `follow` with the same meaning as in :py:meth:`sub_tag`. :return: List of `Records`, possibly empty.
[ "Returns", "list", "of", "direct", "sub", "-", "records", "matching", "any", "tag", "name", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L113-L128
train
andy-z/ged4py
ged4py/model.py
NameRec.freeze
def freeze(self): """Method called by parser when updates to this record finish. :return: self """ # None is the same as empty string if self.value is None: self.value = "" if self.dialect in [DIALECT_ALTREE]: name_tuple = parse_name_altree(self) elif self.dialect in [DIALECT_MYHERITAGE]: name_tuple = parse_name_myher(self) elif self.dialect in [DIALECT_ANCESTRIS]: name_tuple = parse_name_ancestris(self) else: name_tuple = split_name(self.value) self.value = name_tuple return self
python
def freeze(self): """Method called by parser when updates to this record finish. :return: self """ # None is the same as empty string if self.value is None: self.value = "" if self.dialect in [DIALECT_ALTREE]: name_tuple = parse_name_altree(self) elif self.dialect in [DIALECT_MYHERITAGE]: name_tuple = parse_name_myher(self) elif self.dialect in [DIALECT_ANCESTRIS]: name_tuple = parse_name_ancestris(self) else: name_tuple = split_name(self.value) self.value = name_tuple return self
[ "def", "freeze", "(", "self", ")", ":", "# None is the same as empty string", "if", "self", ".", "value", "is", "None", ":", "self", ".", "value", "=", "\"\"", "if", "self", ".", "dialect", "in", "[", "DIALECT_ALTREE", "]", ":", "name_tuple", "=", "parse_name_altree", "(", "self", ")", "elif", "self", ".", "dialect", "in", "[", "DIALECT_MYHERITAGE", "]", ":", "name_tuple", "=", "parse_name_myher", "(", "self", ")", "elif", "self", ".", "dialect", "in", "[", "DIALECT_ANCESTRIS", "]", ":", "name_tuple", "=", "parse_name_ancestris", "(", "self", ")", "else", ":", "name_tuple", "=", "split_name", "(", "self", ".", "value", ")", "self", ".", "value", "=", "name_tuple", "return", "self" ]
Method called by parser when updates to this record finish. :return: self
[ "Method", "called", "by", "parser", "when", "updates", "to", "this", "record", "finish", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L200-L217
train
andy-z/ged4py
ged4py/model.py
Name.given
def given(self): """Given name could include both first and middle name""" if self._primary.value[0] and self._primary.value[2]: return self._primary.value[0] + ' ' + self._primary.value[2] return self._primary.value[0] or self._primary.value[2]
python
def given(self): """Given name could include both first and middle name""" if self._primary.value[0] and self._primary.value[2]: return self._primary.value[0] + ' ' + self._primary.value[2] return self._primary.value[0] or self._primary.value[2]
[ "def", "given", "(", "self", ")", ":", "if", "self", ".", "_primary", ".", "value", "[", "0", "]", "and", "self", ".", "_primary", ".", "value", "[", "2", "]", ":", "return", "self", ".", "_primary", ".", "value", "[", "0", "]", "+", "' '", "+", "self", ".", "_primary", ".", "value", "[", "2", "]", "return", "self", ".", "_primary", ".", "value", "[", "0", "]", "or", "self", ".", "_primary", ".", "value", "[", "2", "]" ]
Given name could include both first and middle name
[ "Given", "name", "could", "include", "both", "first", "and", "middle", "name" ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L268-L272
train
andy-z/ged4py
ged4py/model.py
Name.maiden
def maiden(self): """Maiden last name, can be None""" if self._dialect == DIALECT_DEFAULT: # for default/unknown dialect try "maiden" name record first for name in self._names: if name.type == "maiden": return name.value[1] # rely on NameRec extracting it from other source if self._primary and len(self._primary.value) > 3: return self._primary.value[3] return None
python
def maiden(self): """Maiden last name, can be None""" if self._dialect == DIALECT_DEFAULT: # for default/unknown dialect try "maiden" name record first for name in self._names: if name.type == "maiden": return name.value[1] # rely on NameRec extracting it from other source if self._primary and len(self._primary.value) > 3: return self._primary.value[3] return None
[ "def", "maiden", "(", "self", ")", ":", "if", "self", ".", "_dialect", "==", "DIALECT_DEFAULT", ":", "# for default/unknown dialect try \"maiden\" name record first", "for", "name", "in", "self", ".", "_names", ":", "if", "name", ".", "type", "==", "\"maiden\"", ":", "return", "name", ".", "value", "[", "1", "]", "# rely on NameRec extracting it from other source", "if", "self", ".", "_primary", "and", "len", "(", "self", ".", "_primary", ".", "value", ")", ">", "3", ":", "return", "self", ".", "_primary", ".", "value", "[", "3", "]", "return", "None" ]
Maiden last name, can be None
[ "Maiden", "last", "name", "can", "be", "None" ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L283-L293
train
andy-z/ged4py
ged4py/model.py
Name.order
def order(self, order): """Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings """ given = self.given surname = self.surname if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN): surname = self.maiden or self.surname # We are collating empty names to come after non-empty, # so instead of empty we return "2" and add "1" as prefix to others given = ("1" + given) if given else "2" surname = ("1" + surname) if surname else "2" if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN): return (surname, given) elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN): return (given, surname) else: raise ValueError("unexpected order: {}".format(order))
python
def order(self, order): """Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings """ given = self.given surname = self.surname if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN): surname = self.maiden or self.surname # We are collating empty names to come after non-empty, # so instead of empty we return "2" and add "1" as prefix to others given = ("1" + given) if given else "2" surname = ("1" + surname) if surname else "2" if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN): return (surname, given) elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN): return (given, surname) else: raise ValueError("unexpected order: {}".format(order))
[ "def", "order", "(", "self", ",", "order", ")", ":", "given", "=", "self", ".", "given", "surname", "=", "self", ".", "surname", "if", "order", "in", "(", "ORDER_MAIDEN_GIVEN", ",", "ORDER_GIVEN_MAIDEN", ")", ":", "surname", "=", "self", ".", "maiden", "or", "self", ".", "surname", "# We are collating empty names to come after non-empty,", "# so instead of empty we return \"2\" and add \"1\" as prefix to others", "given", "=", "(", "\"1\"", "+", "given", ")", "if", "given", "else", "\"2\"", "surname", "=", "(", "\"1\"", "+", "surname", ")", "if", "surname", "else", "\"2\"", "if", "order", "in", "(", "ORDER_SURNAME_GIVEN", ",", "ORDER_MAIDEN_GIVEN", ")", ":", "return", "(", "surname", ",", "given", ")", "elif", "order", "in", "(", "ORDER_GIVEN_SURNAME", ",", "ORDER_GIVEN_MAIDEN", ")", ":", "return", "(", "given", ",", "surname", ")", "else", ":", "raise", "ValueError", "(", "\"unexpected order: {}\"", ".", "format", "(", "order", ")", ")" ]
Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings
[ "Returns", "name", "order", "key", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L295-L321
train
andy-z/ged4py
ged4py/model.py
Name.format
def format(self): """Format name for output. :return: Formatted name representation. """ name = self._primary.value[0] if self.surname: if name: name += ' ' name += self.surname if self._primary.value[2]: if name: name += ' ' name += self._primary.value[2] return name
python
def format(self): """Format name for output. :return: Formatted name representation. """ name = self._primary.value[0] if self.surname: if name: name += ' ' name += self.surname if self._primary.value[2]: if name: name += ' ' name += self._primary.value[2] return name
[ "def", "format", "(", "self", ")", ":", "name", "=", "self", ".", "_primary", ".", "value", "[", "0", "]", "if", "self", ".", "surname", ":", "if", "name", ":", "name", "+=", "' '", "name", "+=", "self", ".", "surname", "if", "self", ".", "_primary", ".", "value", "[", "2", "]", ":", "if", "name", ":", "name", "+=", "' '", "name", "+=", "self", ".", "_primary", ".", "value", "[", "2", "]", "return", "name" ]
Format name for output. :return: Formatted name representation.
[ "Format", "name", "for", "output", "." ]
d0e0cceaadf0a84cbf052705e3c27303b12e1757
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L323-L337
train
frasertweedale/ledgertools
ltlib/rule.py
Rule.match
def match(self, xn): """Processes a transaction against this rule If all conditions are satisfied, a list of outcomes is returned. If any condition is unsatisifed, None is returned. """ if all(map(lambda x: x.match(xn), self.conditions)): return self.outcomes return None
python
def match(self, xn): """Processes a transaction against this rule If all conditions are satisfied, a list of outcomes is returned. If any condition is unsatisifed, None is returned. """ if all(map(lambda x: x.match(xn), self.conditions)): return self.outcomes return None
[ "def", "match", "(", "self", ",", "xn", ")", ":", "if", "all", "(", "map", "(", "lambda", "x", ":", "x", ".", "match", "(", "xn", ")", ",", "self", ".", "conditions", ")", ")", ":", "return", "self", ".", "outcomes", "return", "None" ]
Processes a transaction against this rule If all conditions are satisfied, a list of outcomes is returned. If any condition is unsatisifed, None is returned.
[ "Processes", "a", "transaction", "against", "this", "rule" ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/rule.py#L152-L160
train
geophysics-ubonn/reda
lib/reda/importers/sip04.py
import_sip04_data_all
def import_sip04_data_all(data_filename): """Import ALL data from the result files Parameters ---------- data_filename : string Path to .mat or .csv file containing SIP-04 measurement results. Note that the .csv file does not contain all data contained in the .mat file! Returns ------- df_all : :py:class:`pandas.DataFrame` The data, contained in a DataFrame """ filename, fformat = os.path.splitext(data_filename) if fformat == '.csv': print('Import SIP04 data from .csv file') df_all = _import_csv_file(data_filename) elif fformat == '.mat': print('Import SIP04 data from .mat file') df_all = _import_mat_file(data_filename) else: print('Please use .csv or .mat format.') df_all = None return df_all
python
def import_sip04_data_all(data_filename): """Import ALL data from the result files Parameters ---------- data_filename : string Path to .mat or .csv file containing SIP-04 measurement results. Note that the .csv file does not contain all data contained in the .mat file! Returns ------- df_all : :py:class:`pandas.DataFrame` The data, contained in a DataFrame """ filename, fformat = os.path.splitext(data_filename) if fformat == '.csv': print('Import SIP04 data from .csv file') df_all = _import_csv_file(data_filename) elif fformat == '.mat': print('Import SIP04 data from .mat file') df_all = _import_mat_file(data_filename) else: print('Please use .csv or .mat format.') df_all = None return df_all
[ "def", "import_sip04_data_all", "(", "data_filename", ")", ":", "filename", ",", "fformat", "=", "os", ".", "path", ".", "splitext", "(", "data_filename", ")", "if", "fformat", "==", "'.csv'", ":", "print", "(", "'Import SIP04 data from .csv file'", ")", "df_all", "=", "_import_csv_file", "(", "data_filename", ")", "elif", "fformat", "==", "'.mat'", ":", "print", "(", "'Import SIP04 data from .mat file'", ")", "df_all", "=", "_import_mat_file", "(", "data_filename", ")", "else", ":", "print", "(", "'Please use .csv or .mat format.'", ")", "df_all", "=", "None", "return", "df_all" ]
Import ALL data from the result files Parameters ---------- data_filename : string Path to .mat or .csv file containing SIP-04 measurement results. Note that the .csv file does not contain all data contained in the .mat file! Returns ------- df_all : :py:class:`pandas.DataFrame` The data, contained in a DataFrame
[ "Import", "ALL", "data", "from", "the", "result", "files" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/sip04.py#L94-L121
train
YosaiProject/yosai_alchemystore
yosai_alchemystore/meta/meta.py
init_session
def init_session(db_url=None, echo=False, engine=None, settings=None): """ A SQLAlchemy Session requires that an engine be initialized if one isn't provided. """ if engine is None: engine = init_engine(db_url=db_url, echo=echo, settings=settings) return sessionmaker(bind=engine)
python
def init_session(db_url=None, echo=False, engine=None, settings=None): """ A SQLAlchemy Session requires that an engine be initialized if one isn't provided. """ if engine is None: engine = init_engine(db_url=db_url, echo=echo, settings=settings) return sessionmaker(bind=engine)
[ "def", "init_session", "(", "db_url", "=", "None", ",", "echo", "=", "False", ",", "engine", "=", "None", ",", "settings", "=", "None", ")", ":", "if", "engine", "is", "None", ":", "engine", "=", "init_engine", "(", "db_url", "=", "db_url", ",", "echo", "=", "echo", ",", "settings", "=", "settings", ")", "return", "sessionmaker", "(", "bind", "=", "engine", ")" ]
A SQLAlchemy Session requires that an engine be initialized if one isn't provided.
[ "A", "SQLAlchemy", "Session", "requires", "that", "an", "engine", "be", "initialized", "if", "one", "isn", "t", "provided", "." ]
6479c159ab2ac357e6b70cdd71a2d673279e86bb
https://github.com/YosaiProject/yosai_alchemystore/blob/6479c159ab2ac357e6b70cdd71a2d673279e86bb/yosai_alchemystore/meta/meta.py#L58-L65
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
importers.import_sip256c
def import_sip256c(self, filename, settings=None, reciprocal=None, **kwargs): """Radic SIP256c data import""" if settings is None: settings = {} # we get not electrode positions (dummy1) and no topography data # (dummy2) df, dummy1, dummy2 = reda_sip256c.parse_radic_file( filename, settings, reciprocal=reciprocal, **kwargs) self._add_to_container(df) print('Summary:') self._describe_data(df)
python
def import_sip256c(self, filename, settings=None, reciprocal=None, **kwargs): """Radic SIP256c data import""" if settings is None: settings = {} # we get not electrode positions (dummy1) and no topography data # (dummy2) df, dummy1, dummy2 = reda_sip256c.parse_radic_file( filename, settings, reciprocal=reciprocal, **kwargs) self._add_to_container(df) print('Summary:') self._describe_data(df)
[ "def", "import_sip256c", "(", "self", ",", "filename", ",", "settings", "=", "None", ",", "reciprocal", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "settings", "is", "None", ":", "settings", "=", "{", "}", "# we get not electrode positions (dummy1) and no topography data", "# (dummy2)", "df", ",", "dummy1", ",", "dummy2", "=", "reda_sip256c", ".", "parse_radic_file", "(", "filename", ",", "settings", ",", "reciprocal", "=", "reciprocal", ",", "*", "*", "kwargs", ")", "self", ".", "_add_to_container", "(", "df", ")", "print", "(", "'Summary:'", ")", "self", ".", "_describe_data", "(", "df", ")" ]
Radic SIP256c data import
[ "Radic", "SIP256c", "data", "import" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L72-L84
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
importers.import_eit_fzj
def import_eit_fzj(self, filename, configfile, correction_file=None, timestep=None, **kwargs): """EIT data import for FZJ Medusa systems""" # we get not electrode positions (dummy1) and no topography data # (dummy2) df_emd, dummy1, dummy2 = eit_fzj.read_3p_data( filename, configfile, **kwargs ) if correction_file is not None: eit_fzj_utils.apply_correction_factors(df_emd, correction_file) if timestep is not None: df_emd['timestep'] = timestep self._add_to_container(df_emd) print('Summary:') self._describe_data(df_emd)
python
def import_eit_fzj(self, filename, configfile, correction_file=None, timestep=None, **kwargs): """EIT data import for FZJ Medusa systems""" # we get not electrode positions (dummy1) and no topography data # (dummy2) df_emd, dummy1, dummy2 = eit_fzj.read_3p_data( filename, configfile, **kwargs ) if correction_file is not None: eit_fzj_utils.apply_correction_factors(df_emd, correction_file) if timestep is not None: df_emd['timestep'] = timestep self._add_to_container(df_emd) print('Summary:') self._describe_data(df_emd)
[ "def", "import_eit_fzj", "(", "self", ",", "filename", ",", "configfile", ",", "correction_file", "=", "None", ",", "timestep", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# we get not electrode positions (dummy1) and no topography data", "# (dummy2)", "df_emd", ",", "dummy1", ",", "dummy2", "=", "eit_fzj", ".", "read_3p_data", "(", "filename", ",", "configfile", ",", "*", "*", "kwargs", ")", "if", "correction_file", "is", "not", "None", ":", "eit_fzj_utils", ".", "apply_correction_factors", "(", "df_emd", ",", "correction_file", ")", "if", "timestep", "is", "not", "None", ":", "df_emd", "[", "'timestep'", "]", "=", "timestep", "self", ".", "_add_to_container", "(", "df_emd", ")", "print", "(", "'Summary:'", ")", "self", ".", "_describe_data", "(", "df_emd", ")" ]
EIT data import for FZJ Medusa systems
[ "EIT", "data", "import", "for", "FZJ", "Medusa", "systems" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L86-L105
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.check_dataframe
def check_dataframe(self, dataframe): """Check the given dataframe for the required columns """ required_columns = ( 'a', 'b', 'm', 'n', 'r', ) for column in required_columns: if column not in dataframe: raise Exception('Required column not in dataframe: {0}'.format( column ))
python
def check_dataframe(self, dataframe): """Check the given dataframe for the required columns """ required_columns = ( 'a', 'b', 'm', 'n', 'r', ) for column in required_columns: if column not in dataframe: raise Exception('Required column not in dataframe: {0}'.format( column ))
[ "def", "check_dataframe", "(", "self", ",", "dataframe", ")", ":", "required_columns", "=", "(", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", ",", "'r'", ",", ")", "for", "column", "in", "required_columns", ":", "if", "column", "not", "in", "dataframe", ":", "raise", "Exception", "(", "'Required column not in dataframe: {0}'", ".", "format", "(", "column", ")", ")" ]
Check the given dataframe for the required columns
[ "Check", "the", "given", "dataframe", "for", "the", "required", "columns" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L118-L132
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.query
def query(self, query, inplace=True): """State what you want to keep """ # TODO: add to queue result = self.data.query(query, inplace=inplace) return result
python
def query(self, query, inplace=True): """State what you want to keep """ # TODO: add to queue result = self.data.query(query, inplace=inplace) return result
[ "def", "query", "(", "self", ",", "query", ",", "inplace", "=", "True", ")", ":", "# TODO: add to queue", "result", "=", "self", ".", "data", ".", "query", "(", "query", ",", "inplace", "=", "inplace", ")", "return", "result" ]
State what you want to keep
[ "State", "what", "you", "want", "to", "keep" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L163-L169
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.remove_frequencies
def remove_frequencies(self, fmin, fmax): """Remove frequencies from the dataset """ self.data.query( 'frequency > {0} and frequency < {1}'.format(fmin, fmax), inplace=True ) g = self.data.groupby('frequency') print('Remaining frequencies:') print(sorted(g.groups.keys()))
python
def remove_frequencies(self, fmin, fmax): """Remove frequencies from the dataset """ self.data.query( 'frequency > {0} and frequency < {1}'.format(fmin, fmax), inplace=True ) g = self.data.groupby('frequency') print('Remaining frequencies:') print(sorted(g.groups.keys()))
[ "def", "remove_frequencies", "(", "self", ",", "fmin", ",", "fmax", ")", ":", "self", ".", "data", ".", "query", "(", "'frequency > {0} and frequency < {1}'", ".", "format", "(", "fmin", ",", "fmax", ")", ",", "inplace", "=", "True", ")", "g", "=", "self", ".", "data", ".", "groupby", "(", "'frequency'", ")", "print", "(", "'Remaining frequencies:'", ")", "print", "(", "sorted", "(", "g", ".", "groups", ".", "keys", "(", ")", ")", ")" ]
Remove frequencies from the dataset
[ "Remove", "frequencies", "from", "the", "dataset" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L196-L205
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.compute_K_analytical
def compute_K_analytical(self, spacing): """Assuming an equal electrode spacing, compute the K-factor over a homogeneous half-space. For more complex grids, please refer to the module: reda.utils.geometric_factors Parameters ---------- spacing: float Electrode spacing """ assert isinstance(spacing, Number) K = geometric_factors.compute_K_analytical(self.data, spacing) self.data = geometric_factors.apply_K(self.data, K) fix_sign_with_K(self.data)
python
def compute_K_analytical(self, spacing): """Assuming an equal electrode spacing, compute the K-factor over a homogeneous half-space. For more complex grids, please refer to the module: reda.utils.geometric_factors Parameters ---------- spacing: float Electrode spacing """ assert isinstance(spacing, Number) K = geometric_factors.compute_K_analytical(self.data, spacing) self.data = geometric_factors.apply_K(self.data, K) fix_sign_with_K(self.data)
[ "def", "compute_K_analytical", "(", "self", ",", "spacing", ")", ":", "assert", "isinstance", "(", "spacing", ",", "Number", ")", "K", "=", "geometric_factors", ".", "compute_K_analytical", "(", "self", ".", "data", ",", "spacing", ")", "self", ".", "data", "=", "geometric_factors", ".", "apply_K", "(", "self", ".", "data", ",", "K", ")", "fix_sign_with_K", "(", "self", ".", "data", ")" ]
Assuming an equal electrode spacing, compute the K-factor over a homogeneous half-space. For more complex grids, please refer to the module: reda.utils.geometric_factors Parameters ---------- spacing: float Electrode spacing
[ "Assuming", "an", "equal", "electrode", "spacing", "compute", "the", "K", "-", "factor", "over", "a", "homogeneous", "half", "-", "space", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L207-L223
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.scatter_norrec
def scatter_norrec(self, filename=None, individual=False): """Create a scatter plot for all diff pairs Parameters ---------- filename : string, optional if given, save plot to file individual : bool, optional if set to True, return one figure for each row Returns ------- fig : matplotlib.Figure or list of :py:class:`matplotlib.Figure.Figure` objects the figure object axes : list of matplotlib.axes the individual axes """ # if not otherwise specified, use these column pairs: std_diff_labels = { 'r': 'rdiff', 'rpha': 'rphadiff', } diff_labels = std_diff_labels # check which columns are present in the data labels_to_use = {} for key, item in diff_labels.items(): # only use if BOTH columns are present if key in self.data.columns and item in self.data.columns: labels_to_use[key] = item g_freq = self.data.groupby('frequency') frequencies = list(sorted(g_freq.groups.keys())) if individual: figures = {} axes_all = {} else: Nx = len(labels_to_use.keys()) Ny = len(frequencies) fig, axes = plt.subplots( Ny, Nx, figsize=(Nx * 2.5, Ny * 2.5) ) for row, (name, item) in enumerate(g_freq): if individual: fig, axes_row = plt.subplots( 1, 2, figsize=(16 / 2.54, 6 / 2.54)) else: axes_row = axes[row, :] # loop over the various columns for col_nr, (key, diff_column) in enumerate( sorted(labels_to_use.items())): indices = np.where(~np.isnan(item[diff_column]))[0] ax = axes_row[col_nr] ax.scatter( item[key], item[diff_column], ) ax.set_xlabel(key) ax.set_ylabel(diff_column) ax.set_title('N: {}'.format(len(indices))) if individual: fig.tight_layout() figures[name] = fig axes_all[name] = axes_row if individual: return figures, axes_all else: fig.tight_layout() return fig, axes
python
def scatter_norrec(self, filename=None, individual=False): """Create a scatter plot for all diff pairs Parameters ---------- filename : string, optional if given, save plot to file individual : bool, optional if set to True, return one figure for each row Returns ------- fig : matplotlib.Figure or list of :py:class:`matplotlib.Figure.Figure` objects the figure object axes : list of matplotlib.axes the individual axes """ # if not otherwise specified, use these column pairs: std_diff_labels = { 'r': 'rdiff', 'rpha': 'rphadiff', } diff_labels = std_diff_labels # check which columns are present in the data labels_to_use = {} for key, item in diff_labels.items(): # only use if BOTH columns are present if key in self.data.columns and item in self.data.columns: labels_to_use[key] = item g_freq = self.data.groupby('frequency') frequencies = list(sorted(g_freq.groups.keys())) if individual: figures = {} axes_all = {} else: Nx = len(labels_to_use.keys()) Ny = len(frequencies) fig, axes = plt.subplots( Ny, Nx, figsize=(Nx * 2.5, Ny * 2.5) ) for row, (name, item) in enumerate(g_freq): if individual: fig, axes_row = plt.subplots( 1, 2, figsize=(16 / 2.54, 6 / 2.54)) else: axes_row = axes[row, :] # loop over the various columns for col_nr, (key, diff_column) in enumerate( sorted(labels_to_use.items())): indices = np.where(~np.isnan(item[diff_column]))[0] ax = axes_row[col_nr] ax.scatter( item[key], item[diff_column], ) ax.set_xlabel(key) ax.set_ylabel(diff_column) ax.set_title('N: {}'.format(len(indices))) if individual: fig.tight_layout() figures[name] = fig axes_all[name] = axes_row if individual: return figures, axes_all else: fig.tight_layout() return fig, axes
[ "def", "scatter_norrec", "(", "self", ",", "filename", "=", "None", ",", "individual", "=", "False", ")", ":", "# if not otherwise specified, use these column pairs:", "std_diff_labels", "=", "{", "'r'", ":", "'rdiff'", ",", "'rpha'", ":", "'rphadiff'", ",", "}", "diff_labels", "=", "std_diff_labels", "# check which columns are present in the data", "labels_to_use", "=", "{", "}", "for", "key", ",", "item", "in", "diff_labels", ".", "items", "(", ")", ":", "# only use if BOTH columns are present", "if", "key", "in", "self", ".", "data", ".", "columns", "and", "item", "in", "self", ".", "data", ".", "columns", ":", "labels_to_use", "[", "key", "]", "=", "item", "g_freq", "=", "self", ".", "data", ".", "groupby", "(", "'frequency'", ")", "frequencies", "=", "list", "(", "sorted", "(", "g_freq", ".", "groups", ".", "keys", "(", ")", ")", ")", "if", "individual", ":", "figures", "=", "{", "}", "axes_all", "=", "{", "}", "else", ":", "Nx", "=", "len", "(", "labels_to_use", ".", "keys", "(", ")", ")", "Ny", "=", "len", "(", "frequencies", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "Ny", ",", "Nx", ",", "figsize", "=", "(", "Nx", "*", "2.5", ",", "Ny", "*", "2.5", ")", ")", "for", "row", ",", "(", "name", ",", "item", ")", "in", "enumerate", "(", "g_freq", ")", ":", "if", "individual", ":", "fig", ",", "axes_row", "=", "plt", ".", "subplots", "(", "1", ",", "2", ",", "figsize", "=", "(", "16", "/", "2.54", ",", "6", "/", "2.54", ")", ")", "else", ":", "axes_row", "=", "axes", "[", "row", ",", ":", "]", "# loop over the various columns", "for", "col_nr", ",", "(", "key", ",", "diff_column", ")", "in", "enumerate", "(", "sorted", "(", "labels_to_use", ".", "items", "(", ")", ")", ")", ":", "indices", "=", "np", ".", "where", "(", "~", "np", ".", "isnan", "(", "item", "[", "diff_column", "]", ")", ")", "[", "0", "]", "ax", "=", "axes_row", "[", "col_nr", "]", "ax", ".", "scatter", "(", "item", "[", "key", "]", ",", "item", "[", "diff_column", "]", ",", ")", "ax", ".", "set_xlabel", "(", "key", ")", "ax", ".", "set_ylabel", "(", "diff_column", ")", "ax", ".", "set_title", "(", "'N: {}'", ".", "format", "(", "len", "(", "indices", ")", ")", ")", "if", "individual", ":", "fig", ".", "tight_layout", "(", ")", "figures", "[", "name", "]", "=", "fig", "axes_all", "[", "name", "]", "=", "axes_row", "if", "individual", ":", "return", "figures", ",", "axes_all", "else", ":", "fig", ".", "tight_layout", "(", ")", "return", "fig", ",", "axes" ]
Create a scatter plot for all diff pairs Parameters ---------- filename : string, optional if given, save plot to file individual : bool, optional if set to True, return one figure for each row Returns ------- fig : matplotlib.Figure or list of :py:class:`matplotlib.Figure.Figure` objects the figure object axes : list of matplotlib.axes the individual axes
[ "Create", "a", "scatter", "plot", "for", "all", "diff", "pairs" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L230-L305
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.get_spectrum
def get_spectrum(self, nr_id=None, abmn=None, plot_filename=None): """Return a spectrum and its reciprocal counter part, if present in the dataset. Optimally, refer to the spectrum by its normal-reciprocal id. Returns ------- spectrum_nor : :py:class:`reda.eis.plots.sip_response` Normal spectrum. None if no normal spectrum is available spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None Reciprocal spectrum. None if no reciprocal spectrum is available fig : :py:class:`matplotlib.Figure.Figure` , optional Figure object (only if plot_filename is set) """ assert nr_id is None or abmn is None # determine nr_id for given abmn tuple if abmn is not None: subdata = self.data.query( 'a == {} and b == {} and m == {} and n == {}'.format(*abmn) ).sort_values('frequency') if subdata.shape[0] == 0: return None, None # determine the norrec-id of this spectrum nr_id = subdata['id'].iloc[0] # get spectra subdata_nor = self.data.query( 'id == {} and norrec=="nor"'.format(nr_id) ).sort_values('frequency') subdata_rec = self.data.query( 'id == {} and norrec=="rec"'.format(nr_id) ).sort_values('frequency') # create spectrum objects spectrum_nor = None spectrum_rec = None if subdata_nor.shape[0] > 0: spectrum_nor = eis_plot.sip_response( frequencies=subdata_nor['frequency'].values, rmag=subdata_nor['r'], rpha=subdata_nor['rpha'], ) if subdata_rec.shape[0] > 0: spectrum_rec = eis_plot.sip_response( frequencies=subdata_rec['frequency'].values, rmag=subdata_rec['r'], rpha=subdata_rec['rpha'], ) if plot_filename is not None: if spectrum_nor is not None: fig = spectrum_nor.plot( plot_filename, reciprocal=spectrum_rec, return_fig=True, title='a: {} b: {} m: {}: n: {}'.format( *subdata_nor[['a', 'b', 'm', 'n']].values[0, :] ) ) return spectrum_nor, spectrum_rec, fig return spectrum_nor, spectrum_rec
python
def get_spectrum(self, nr_id=None, abmn=None, plot_filename=None): """Return a spectrum and its reciprocal counter part, if present in the dataset. Optimally, refer to the spectrum by its normal-reciprocal id. Returns ------- spectrum_nor : :py:class:`reda.eis.plots.sip_response` Normal spectrum. None if no normal spectrum is available spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None Reciprocal spectrum. None if no reciprocal spectrum is available fig : :py:class:`matplotlib.Figure.Figure` , optional Figure object (only if plot_filename is set) """ assert nr_id is None or abmn is None # determine nr_id for given abmn tuple if abmn is not None: subdata = self.data.query( 'a == {} and b == {} and m == {} and n == {}'.format(*abmn) ).sort_values('frequency') if subdata.shape[0] == 0: return None, None # determine the norrec-id of this spectrum nr_id = subdata['id'].iloc[0] # get spectra subdata_nor = self.data.query( 'id == {} and norrec=="nor"'.format(nr_id) ).sort_values('frequency') subdata_rec = self.data.query( 'id == {} and norrec=="rec"'.format(nr_id) ).sort_values('frequency') # create spectrum objects spectrum_nor = None spectrum_rec = None if subdata_nor.shape[0] > 0: spectrum_nor = eis_plot.sip_response( frequencies=subdata_nor['frequency'].values, rmag=subdata_nor['r'], rpha=subdata_nor['rpha'], ) if subdata_rec.shape[0] > 0: spectrum_rec = eis_plot.sip_response( frequencies=subdata_rec['frequency'].values, rmag=subdata_rec['r'], rpha=subdata_rec['rpha'], ) if plot_filename is not None: if spectrum_nor is not None: fig = spectrum_nor.plot( plot_filename, reciprocal=spectrum_rec, return_fig=True, title='a: {} b: {} m: {}: n: {}'.format( *subdata_nor[['a', 'b', 'm', 'n']].values[0, :] ) ) return spectrum_nor, spectrum_rec, fig return spectrum_nor, spectrum_rec
[ "def", "get_spectrum", "(", "self", ",", "nr_id", "=", "None", ",", "abmn", "=", "None", ",", "plot_filename", "=", "None", ")", ":", "assert", "nr_id", "is", "None", "or", "abmn", "is", "None", "# determine nr_id for given abmn tuple", "if", "abmn", "is", "not", "None", ":", "subdata", "=", "self", ".", "data", ".", "query", "(", "'a == {} and b == {} and m == {} and n == {}'", ".", "format", "(", "*", "abmn", ")", ")", ".", "sort_values", "(", "'frequency'", ")", "if", "subdata", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "None", ",", "None", "# determine the norrec-id of this spectrum", "nr_id", "=", "subdata", "[", "'id'", "]", ".", "iloc", "[", "0", "]", "# get spectra", "subdata_nor", "=", "self", ".", "data", ".", "query", "(", "'id == {} and norrec==\"nor\"'", ".", "format", "(", "nr_id", ")", ")", ".", "sort_values", "(", "'frequency'", ")", "subdata_rec", "=", "self", ".", "data", ".", "query", "(", "'id == {} and norrec==\"rec\"'", ".", "format", "(", "nr_id", ")", ")", ".", "sort_values", "(", "'frequency'", ")", "# create spectrum objects", "spectrum_nor", "=", "None", "spectrum_rec", "=", "None", "if", "subdata_nor", ".", "shape", "[", "0", "]", ">", "0", ":", "spectrum_nor", "=", "eis_plot", ".", "sip_response", "(", "frequencies", "=", "subdata_nor", "[", "'frequency'", "]", ".", "values", ",", "rmag", "=", "subdata_nor", "[", "'r'", "]", ",", "rpha", "=", "subdata_nor", "[", "'rpha'", "]", ",", ")", "if", "subdata_rec", ".", "shape", "[", "0", "]", ">", "0", ":", "spectrum_rec", "=", "eis_plot", ".", "sip_response", "(", "frequencies", "=", "subdata_rec", "[", "'frequency'", "]", ".", "values", ",", "rmag", "=", "subdata_rec", "[", "'r'", "]", ",", "rpha", "=", "subdata_rec", "[", "'rpha'", "]", ",", ")", "if", "plot_filename", "is", "not", "None", ":", "if", "spectrum_nor", "is", "not", "None", ":", "fig", "=", "spectrum_nor", ".", "plot", "(", "plot_filename", ",", "reciprocal", "=", "spectrum_rec", ",", "return_fig", "=", "True", ",", "title", "=", "'a: {} b: {} m: {}: n: {}'", ".", "format", "(", "*", "subdata_nor", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", ".", "values", "[", "0", ",", ":", "]", ")", ")", "return", "spectrum_nor", ",", "spectrum_rec", ",", "fig", "return", "spectrum_nor", ",", "spectrum_rec" ]
Return a spectrum and its reciprocal counter part, if present in the dataset. Optimally, refer to the spectrum by its normal-reciprocal id. Returns ------- spectrum_nor : :py:class:`reda.eis.plots.sip_response` Normal spectrum. None if no normal spectrum is available spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None Reciprocal spectrum. None if no reciprocal spectrum is available fig : :py:class:`matplotlib.Figure.Figure` , optional Figure object (only if plot_filename is set)
[ "Return", "a", "spectrum", "and", "its", "reciprocal", "counter", "part", "if", "present", "in", "the", "dataset", ".", "Optimally", "refer", "to", "the", "spectrum", "by", "its", "normal", "-", "reciprocal", "id", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L357-L419
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.plot_all_spectra
def plot_all_spectra(self, outdir): """This is a convenience function to plot ALL spectra currently stored in the container. It is useful to asses whether data filters do perform correctly. Note that the function just iterates over all ids and plots the corresponding spectra, thus it is slow. Spectra a named using the format: \%.2i_spectrum_id_\{\}.png. Parameters ---------- outdir : string Output directory to store spectra in. Created if it does not exist. """ os.makedirs(outdir, exist_ok=True) g = self.data.groupby('id') for nr, (name, item) in enumerate(g): print( 'Plotting spectrum with id {} ({} / {})'.format( name, nr, len(g.groups.keys())) ) plot_filename = ''.join(( outdir + os.sep, '{:04}_spectrum_id_{}.png'.format(nr, name) )) spec_nor, spec_rec, spec_fig = self.get_spectrum( nr_id=name, plot_filename=plot_filename ) plt.close(spec_fig)
python
def plot_all_spectra(self, outdir): """This is a convenience function to plot ALL spectra currently stored in the container. It is useful to asses whether data filters do perform correctly. Note that the function just iterates over all ids and plots the corresponding spectra, thus it is slow. Spectra a named using the format: \%.2i_spectrum_id_\{\}.png. Parameters ---------- outdir : string Output directory to store spectra in. Created if it does not exist. """ os.makedirs(outdir, exist_ok=True) g = self.data.groupby('id') for nr, (name, item) in enumerate(g): print( 'Plotting spectrum with id {} ({} / {})'.format( name, nr, len(g.groups.keys())) ) plot_filename = ''.join(( outdir + os.sep, '{:04}_spectrum_id_{}.png'.format(nr, name) )) spec_nor, spec_rec, spec_fig = self.get_spectrum( nr_id=name, plot_filename=plot_filename ) plt.close(spec_fig)
[ "def", "plot_all_spectra", "(", "self", ",", "outdir", ")", ":", "os", ".", "makedirs", "(", "outdir", ",", "exist_ok", "=", "True", ")", "g", "=", "self", ".", "data", ".", "groupby", "(", "'id'", ")", "for", "nr", ",", "(", "name", ",", "item", ")", "in", "enumerate", "(", "g", ")", ":", "print", "(", "'Plotting spectrum with id {} ({} / {})'", ".", "format", "(", "name", ",", "nr", ",", "len", "(", "g", ".", "groups", ".", "keys", "(", ")", ")", ")", ")", "plot_filename", "=", "''", ".", "join", "(", "(", "outdir", "+", "os", ".", "sep", ",", "'{:04}_spectrum_id_{}.png'", ".", "format", "(", "nr", ",", "name", ")", ")", ")", "spec_nor", ",", "spec_rec", ",", "spec_fig", "=", "self", ".", "get_spectrum", "(", "nr_id", "=", "name", ",", "plot_filename", "=", "plot_filename", ")", "plt", ".", "close", "(", "spec_fig", ")" ]
This is a convenience function to plot ALL spectra currently stored in the container. It is useful to asses whether data filters do perform correctly. Note that the function just iterates over all ids and plots the corresponding spectra, thus it is slow. Spectra a named using the format: \%.2i_spectrum_id_\{\}.png. Parameters ---------- outdir : string Output directory to store spectra in. Created if it does not exist.
[ "This", "is", "a", "convenience", "function", "to", "plot", "ALL", "spectra", "currently", "stored", "in", "the", "container", ".", "It", "is", "useful", "to", "asses", "whether", "data", "filters", "do", "perform", "correctly", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L421-L453
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.plot_pseudosections
def plot_pseudosections(self, column, filename=None, return_fig=False): """Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object """ assert column in self.data.columns g = self.data.groupby('frequency') fig, axes = plt.subplots( 4, 2, figsize=(15 / 2.54, 20 / 2.54), sharex=True, sharey=True ) for ax, (key, item) in zip(axes.flat, g): fig, ax, cb = PS.plot_pseudosection_type2( item, ax=ax, column=column ) ax.set_title('f: {} Hz'.format(key)) fig.tight_layout() if filename is not None: fig.savefig(filename, dpi=300) if return_fig: return fig else: plt.close(fig)
python
def plot_pseudosections(self, column, filename=None, return_fig=False): """Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object """ assert column in self.data.columns g = self.data.groupby('frequency') fig, axes = plt.subplots( 4, 2, figsize=(15 / 2.54, 20 / 2.54), sharex=True, sharey=True ) for ax, (key, item) in zip(axes.flat, g): fig, ax, cb = PS.plot_pseudosection_type2( item, ax=ax, column=column ) ax.set_title('f: {} Hz'.format(key)) fig.tight_layout() if filename is not None: fig.savefig(filename, dpi=300) if return_fig: return fig else: plt.close(fig)
[ "def", "plot_pseudosections", "(", "self", ",", "column", ",", "filename", "=", "None", ",", "return_fig", "=", "False", ")", ":", "assert", "column", "in", "self", ".", "data", ".", "columns", "g", "=", "self", ".", "data", ".", "groupby", "(", "'frequency'", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "4", ",", "2", ",", "figsize", "=", "(", "15", "/", "2.54", ",", "20", "/", "2.54", ")", ",", "sharex", "=", "True", ",", "sharey", "=", "True", ")", "for", "ax", ",", "(", "key", ",", "item", ")", "in", "zip", "(", "axes", ".", "flat", ",", "g", ")", ":", "fig", ",", "ax", ",", "cb", "=", "PS", ".", "plot_pseudosection_type2", "(", "item", ",", "ax", "=", "ax", ",", "column", "=", "column", ")", "ax", ".", "set_title", "(", "'f: {} Hz'", ".", "format", "(", "key", ")", ")", "fig", ".", "tight_layout", "(", ")", "if", "filename", "is", "not", "None", ":", "fig", ".", "savefig", "(", "filename", ",", "dpi", "=", "300", ")", "if", "return_fig", ":", "return", "fig", "else", ":", "plt", ".", "close", "(", "fig", ")" ]
Create a multi-plot with one pseudosection for each frequency. Parameters ---------- column : string which column to plot filename : None|string output filename. If set to None, do not write to file. Default: None return_fig : bool if True, return the generated figure object. Default: False Returns ------- fig : None|matplotlib.Figure if return_fig is set to True, return the generated Figure object
[ "Create", "a", "multi", "-", "plot", "with", "one", "pseudosection", "for", "each", "frequency", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L455-L493
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.export_to_directory_crtomo
def export_to_directory_crtomo(self, directory, norrec='norrec'): """Export the sEIT data into data files that can be read by CRTomo. Parameters ---------- directory : string output directory. will be created if required norrec : string (nor|rec|norrec) Which data to export. Default: norrec """ exporter_crtomo.write_files_to_directory( self.data, directory, norrec=norrec )
python
def export_to_directory_crtomo(self, directory, norrec='norrec'): """Export the sEIT data into data files that can be read by CRTomo. Parameters ---------- directory : string output directory. will be created if required norrec : string (nor|rec|norrec) Which data to export. Default: norrec """ exporter_crtomo.write_files_to_directory( self.data, directory, norrec=norrec )
[ "def", "export_to_directory_crtomo", "(", "self", ",", "directory", ",", "norrec", "=", "'norrec'", ")", ":", "exporter_crtomo", ".", "write_files_to_directory", "(", "self", ".", "data", ",", "directory", ",", "norrec", "=", "norrec", ")" ]
Export the sEIT data into data files that can be read by CRTomo. Parameters ---------- directory : string output directory. will be created if required norrec : string (nor|rec|norrec) Which data to export. Default: norrec
[ "Export", "the", "sEIT", "data", "into", "data", "files", "that", "can", "be", "read", "by", "CRTomo", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L495-L508
train
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
sEIT.export_to_crtomo_seit_manager
def export_to_crtomo_seit_manager(self, grid): """Return a ready-initialized seit-manager object from the CRTomo tools. This function only works if the crtomo_tools are installed. """ import crtomo g = self.data.groupby('frequency') seit_data = {} for name, item in g: print(name, item.shape, item.size) if item.shape[0] > 0: seit_data[name] = item[ ['a', 'b', 'm', 'n', 'r', 'rpha'] ].values seit = crtomo.eitMan(grid=grid, seit_data=seit_data) return seit
python
def export_to_crtomo_seit_manager(self, grid): """Return a ready-initialized seit-manager object from the CRTomo tools. This function only works if the crtomo_tools are installed. """ import crtomo g = self.data.groupby('frequency') seit_data = {} for name, item in g: print(name, item.shape, item.size) if item.shape[0] > 0: seit_data[name] = item[ ['a', 'b', 'm', 'n', 'r', 'rpha'] ].values seit = crtomo.eitMan(grid=grid, seit_data=seit_data) return seit
[ "def", "export_to_crtomo_seit_manager", "(", "self", ",", "grid", ")", ":", "import", "crtomo", "g", "=", "self", ".", "data", ".", "groupby", "(", "'frequency'", ")", "seit_data", "=", "{", "}", "for", "name", ",", "item", "in", "g", ":", "print", "(", "name", ",", "item", ".", "shape", ",", "item", ".", "size", ")", "if", "item", ".", "shape", "[", "0", "]", ">", "0", ":", "seit_data", "[", "name", "]", "=", "item", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", ",", "'r'", ",", "'rpha'", "]", "]", ".", "values", "seit", "=", "crtomo", ".", "eitMan", "(", "grid", "=", "grid", ",", "seit_data", "=", "seit_data", ")", "return", "seit" ]
Return a ready-initialized seit-manager object from the CRTomo tools. This function only works if the crtomo_tools are installed.
[ "Return", "a", "ready", "-", "initialized", "seit", "-", "manager", "object", "from", "the", "CRTomo", "tools", ".", "This", "function", "only", "works", "if", "the", "crtomo_tools", "are", "installed", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L510-L524
train
joelbm24/brainy
lib/bfinter.py
Brainy.get_tape
def get_tape(self, start=0, end=10): '''Pretty prints the tape values''' self.tape_start = start self.tape_end = end self.tape_length = end - start tmp = '\n'+"|"+str(start)+"| " for i in xrange(len(self.tape[start:end])): if i == self.cur_cell: tmp += "[" + str(self.tape[i]) + "] " else: tmp += ":" + str(self.tape[i]) + ": " tmp += " |"+str(end)+"|" return tmp
python
def get_tape(self, start=0, end=10): '''Pretty prints the tape values''' self.tape_start = start self.tape_end = end self.tape_length = end - start tmp = '\n'+"|"+str(start)+"| " for i in xrange(len(self.tape[start:end])): if i == self.cur_cell: tmp += "[" + str(self.tape[i]) + "] " else: tmp += ":" + str(self.tape[i]) + ": " tmp += " |"+str(end)+"|" return tmp
[ "def", "get_tape", "(", "self", ",", "start", "=", "0", ",", "end", "=", "10", ")", ":", "self", ".", "tape_start", "=", "start", "self", ".", "tape_end", "=", "end", "self", ".", "tape_length", "=", "end", "-", "start", "tmp", "=", "'\\n'", "+", "\"|\"", "+", "str", "(", "start", ")", "+", "\"| \"", "for", "i", "in", "xrange", "(", "len", "(", "self", ".", "tape", "[", "start", ":", "end", "]", ")", ")", ":", "if", "i", "==", "self", ".", "cur_cell", ":", "tmp", "+=", "\"[\"", "+", "str", "(", "self", ".", "tape", "[", "i", "]", ")", "+", "\"] \"", "else", ":", "tmp", "+=", "\":\"", "+", "str", "(", "self", ".", "tape", "[", "i", "]", ")", "+", "\": \"", "tmp", "+=", "\" |\"", "+", "str", "(", "end", ")", "+", "\"|\"", "return", "tmp" ]
Pretty prints the tape values
[ "Pretty", "prints", "the", "tape", "values" ]
bc3e1d6e020f1bb884a9bbbda834dac3a7a7fdb4
https://github.com/joelbm24/brainy/blob/bc3e1d6e020f1bb884a9bbbda834dac3a7a7fdb4/lib/bfinter.py#L79-L90
train
geophysics-ubonn/reda
lib/reda/containers/SIP.py
importers.import_sip04
def import_sip04(self, filename, timestep=None): """SIP04 data import Parameters ---------- filename: string Path to .mat or .csv file containing SIP-04 measurement results Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_01', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat') """ df = reda_sip04.import_sip04_data(filename) if timestep is not None: print('adding timestep') df['timestep'] = timestep self._add_to_container(df) print('Summary:') self._describe_data(df)
python
def import_sip04(self, filename, timestep=None): """SIP04 data import Parameters ---------- filename: string Path to .mat or .csv file containing SIP-04 measurement results Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_01', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat') """ df = reda_sip04.import_sip04_data(filename) if timestep is not None: print('adding timestep') df['timestep'] = timestep self._add_to_container(df) print('Summary:') self._describe_data(df)
[ "def", "import_sip04", "(", "self", ",", "filename", ",", "timestep", "=", "None", ")", ":", "df", "=", "reda_sip04", ".", "import_sip04_data", "(", "filename", ")", "if", "timestep", "is", "not", "None", ":", "print", "(", "'adding timestep'", ")", "df", "[", "'timestep'", "]", "=", "timestep", "self", ".", "_add_to_container", "(", "df", ")", "print", "(", "'Summary:'", ")", "self", ".", "_describe_data", "(", "df", ")" ]
SIP04 data import Parameters ---------- filename: string Path to .mat or .csv file containing SIP-04 measurement results Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_01', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat')
[ "SIP04", "data", "import" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L26-L54
train
geophysics-ubonn/reda
lib/reda/containers/SIP.py
SIP.check_dataframe
def check_dataframe(self, dataframe): """Check the given dataframe for the required type and columns """ if dataframe is None: return None # is this a DataFrame if not isinstance(dataframe, pd.DataFrame): raise Exception( 'The provided dataframe object is not a pandas.DataFrame' ) for column in self.required_columns: if column not in dataframe: raise Exception('Required column not in dataframe: {0}'.format( column )) return dataframe
python
def check_dataframe(self, dataframe): """Check the given dataframe for the required type and columns """ if dataframe is None: return None # is this a DataFrame if not isinstance(dataframe, pd.DataFrame): raise Exception( 'The provided dataframe object is not a pandas.DataFrame' ) for column in self.required_columns: if column not in dataframe: raise Exception('Required column not in dataframe: {0}'.format( column )) return dataframe
[ "def", "check_dataframe", "(", "self", ",", "dataframe", ")", ":", "if", "dataframe", "is", "None", ":", "return", "None", "# is this a DataFrame", "if", "not", "isinstance", "(", "dataframe", ",", "pd", ".", "DataFrame", ")", ":", "raise", "Exception", "(", "'The provided dataframe object is not a pandas.DataFrame'", ")", "for", "column", "in", "self", ".", "required_columns", ":", "if", "column", "not", "in", "dataframe", ":", "raise", "Exception", "(", "'Required column not in dataframe: {0}'", ".", "format", "(", "column", ")", ")", "return", "dataframe" ]
Check the given dataframe for the required type and columns
[ "Check", "the", "given", "dataframe", "for", "the", "required", "type", "and", "columns" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L73-L90
train
geophysics-ubonn/reda
lib/reda/containers/SIP.py
SIP.reduce_duplicate_frequencies
def reduce_duplicate_frequencies(self): """In case multiple frequencies were measured, average them and compute std, min, max values for zt. In case timesteps were added (i.e., multiple separate measurements), group over those and average for each timestep. Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_06', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat', timestep=0) # well, add the spectrum again as another timestep sip.import_sip04(fid + '/sip_dataA.mat', timestep=1) df = sip.reduce_duplicate_frequencies() """ group_keys = ['frequency', ] if 'timestep' in self.data.columns: group_keys = group_keys + ['timestep', ] g = self.data.groupby(group_keys) def group_apply(item): y = item[['zt_1', 'zt_2', 'zt_3']].values.flatten() zt_imag_std = np.std(y.imag) zt_real_std = np.std(y.real) zt_imag_min = np.min(y.imag) zt_real_min = np.min(y.real) zt_imag_max = np.max(y.imag) zt_real_max = np.max(y.real) zt_imag_mean = np.mean(y.imag) zt_real_mean = np.mean(y.real) dfn = pd.DataFrame( { 'zt_real_mean': zt_real_mean, 'zt_real_std': zt_real_std, 'zt_real_min': zt_real_min, 'zt_real_max': zt_real_max, 'zt_imag_mean': zt_imag_mean, 'zt_imag_std': zt_imag_std, 'zt_imag_min': zt_imag_min, 'zt_imag_max': zt_imag_max, }, index=[0, ] ) dfn['count'] = len(y) dfn.index.name = 'index' return dfn p = g.apply(group_apply) p.index = p.index.droplevel('index') if len(group_keys) > 1: p = p.swaplevel(0, 1).sort_index() return p
python
def reduce_duplicate_frequencies(self): """In case multiple frequencies were measured, average them and compute std, min, max values for zt. In case timesteps were added (i.e., multiple separate measurements), group over those and average for each timestep. Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_06', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat', timestep=0) # well, add the spectrum again as another timestep sip.import_sip04(fid + '/sip_dataA.mat', timestep=1) df = sip.reduce_duplicate_frequencies() """ group_keys = ['frequency', ] if 'timestep' in self.data.columns: group_keys = group_keys + ['timestep', ] g = self.data.groupby(group_keys) def group_apply(item): y = item[['zt_1', 'zt_2', 'zt_3']].values.flatten() zt_imag_std = np.std(y.imag) zt_real_std = np.std(y.real) zt_imag_min = np.min(y.imag) zt_real_min = np.min(y.real) zt_imag_max = np.max(y.imag) zt_real_max = np.max(y.real) zt_imag_mean = np.mean(y.imag) zt_real_mean = np.mean(y.real) dfn = pd.DataFrame( { 'zt_real_mean': zt_real_mean, 'zt_real_std': zt_real_std, 'zt_real_min': zt_real_min, 'zt_real_max': zt_real_max, 'zt_imag_mean': zt_imag_mean, 'zt_imag_std': zt_imag_std, 'zt_imag_min': zt_imag_min, 'zt_imag_max': zt_imag_max, }, index=[0, ] ) dfn['count'] = len(y) dfn.index.name = 'index' return dfn p = g.apply(group_apply) p.index = p.index.droplevel('index') if len(group_keys) > 1: p = p.swaplevel(0, 1).sort_index() return p
[ "def", "reduce_duplicate_frequencies", "(", "self", ")", ":", "group_keys", "=", "[", "'frequency'", ",", "]", "if", "'timestep'", "in", "self", ".", "data", ".", "columns", ":", "group_keys", "=", "group_keys", "+", "[", "'timestep'", ",", "]", "g", "=", "self", ".", "data", ".", "groupby", "(", "group_keys", ")", "def", "group_apply", "(", "item", ")", ":", "y", "=", "item", "[", "[", "'zt_1'", ",", "'zt_2'", ",", "'zt_3'", "]", "]", ".", "values", ".", "flatten", "(", ")", "zt_imag_std", "=", "np", ".", "std", "(", "y", ".", "imag", ")", "zt_real_std", "=", "np", ".", "std", "(", "y", ".", "real", ")", "zt_imag_min", "=", "np", ".", "min", "(", "y", ".", "imag", ")", "zt_real_min", "=", "np", ".", "min", "(", "y", ".", "real", ")", "zt_imag_max", "=", "np", ".", "max", "(", "y", ".", "imag", ")", "zt_real_max", "=", "np", ".", "max", "(", "y", ".", "real", ")", "zt_imag_mean", "=", "np", ".", "mean", "(", "y", ".", "imag", ")", "zt_real_mean", "=", "np", ".", "mean", "(", "y", ".", "real", ")", "dfn", "=", "pd", ".", "DataFrame", "(", "{", "'zt_real_mean'", ":", "zt_real_mean", ",", "'zt_real_std'", ":", "zt_real_std", ",", "'zt_real_min'", ":", "zt_real_min", ",", "'zt_real_max'", ":", "zt_real_max", ",", "'zt_imag_mean'", ":", "zt_imag_mean", ",", "'zt_imag_std'", ":", "zt_imag_std", ",", "'zt_imag_min'", ":", "zt_imag_min", ",", "'zt_imag_max'", ":", "zt_imag_max", ",", "}", ",", "index", "=", "[", "0", ",", "]", ")", "dfn", "[", "'count'", "]", "=", "len", "(", "y", ")", "dfn", ".", "index", ".", "name", "=", "'index'", "return", "dfn", "p", "=", "g", ".", "apply", "(", "group_apply", ")", "p", ".", "index", "=", "p", ".", "index", ".", "droplevel", "(", "'index'", ")", "if", "len", "(", "group_keys", ")", ">", "1", ":", "p", "=", "p", ".", "swaplevel", "(", "0", ",", "1", ")", ".", "sort_index", "(", ")", "return", "p" ]
In case multiple frequencies were measured, average them and compute std, min, max values for zt. In case timesteps were added (i.e., multiple separate measurements), group over those and average for each timestep. Examples -------- :: import tempfile import reda with tempfile.TemporaryDirectory() as fid: reda.data.download_data('sip04_fs_06', fid) sip = reda.SIP() sip.import_sip04(fid + '/sip_dataA.mat', timestep=0) # well, add the spectrum again as another timestep sip.import_sip04(fid + '/sip_dataA.mat', timestep=1) df = sip.reduce_duplicate_frequencies()
[ "In", "case", "multiple", "frequencies", "were", "measured", "average", "them", "and", "compute", "std", "min", "max", "values", "for", "zt", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/SIP.py#L92-L153
train
cloudbase/python-hnvclient
hnv/config/factory.py
_load_class
def _load_class(class_path): """Load the module and return the required class.""" parts = class_path.rsplit('.', 1) module = __import__(parts[0], fromlist=parts[1]) return getattr(module, parts[1])
python
def _load_class(class_path): """Load the module and return the required class.""" parts = class_path.rsplit('.', 1) module = __import__(parts[0], fromlist=parts[1]) return getattr(module, parts[1])
[ "def", "_load_class", "(", "class_path", ")", ":", "parts", "=", "class_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "__import__", "(", "parts", "[", "0", "]", ",", "fromlist", "=", "parts", "[", "1", "]", ")", "return", "getattr", "(", "module", ",", "parts", "[", "1", "]", ")" ]
Load the module and return the required class.
[ "Load", "the", "module", "and", "return", "the", "required", "class", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/config/factory.py#L22-L26
train
RobersonLab/motif_scraper
motif_scraper/__init__.py
rev_comp
def rev_comp( seq, molecule='dna' ): """ DNA|RNA seq -> reverse complement """ if molecule == 'dna': nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" } elif molecule == 'rna': nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" } else: raise ValueError( "rev_comp requires molecule to be dna or rna" ) if not isinstance( seq, six.string_types ): raise TypeError( "seq must be a string!" ) return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] )
python
def rev_comp( seq, molecule='dna' ): """ DNA|RNA seq -> reverse complement """ if molecule == 'dna': nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" } elif molecule == 'rna': nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" } else: raise ValueError( "rev_comp requires molecule to be dna or rna" ) if not isinstance( seq, six.string_types ): raise TypeError( "seq must be a string!" ) return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] )
[ "def", "rev_comp", "(", "seq", ",", "molecule", "=", "'dna'", ")", ":", "if", "molecule", "==", "'dna'", ":", "nuc_dict", "=", "{", "\"A\"", ":", "\"T\"", ",", "\"B\"", ":", "\"V\"", ",", "\"C\"", ":", "\"G\"", ",", "\"D\"", ":", "\"H\"", ",", "\"G\"", ":", "\"C\"", ",", "\"H\"", ":", "\"D\"", ",", "\"K\"", ":", "\"M\"", ",", "\"M\"", ":", "\"K\"", ",", "\"N\"", ":", "\"N\"", ",", "\"R\"", ":", "\"Y\"", ",", "\"S\"", ":", "\"S\"", ",", "\"T\"", ":", "\"A\"", ",", "\"V\"", ":", "\"B\"", ",", "\"W\"", ":", "\"W\"", ",", "\"Y\"", ":", "\"R\"", "}", "elif", "molecule", "==", "'rna'", ":", "nuc_dict", "=", "{", "\"A\"", ":", "\"U\"", ",", "\"B\"", ":", "\"V\"", ",", "\"C\"", ":", "\"G\"", ",", "\"D\"", ":", "\"H\"", ",", "\"G\"", ":", "\"C\"", ",", "\"H\"", ":", "\"D\"", ",", "\"K\"", ":", "\"M\"", ",", "\"M\"", ":", "\"K\"", ",", "\"N\"", ":", "\"N\"", ",", "\"R\"", ":", "\"Y\"", ",", "\"S\"", ":", "\"S\"", ",", "\"U\"", ":", "\"A\"", ",", "\"V\"", ":", "\"B\"", ",", "\"W\"", ":", "\"W\"", ",", "\"Y\"", ":", "\"R\"", "}", "else", ":", "raise", "ValueError", "(", "\"rev_comp requires molecule to be dna or rna\"", ")", "if", "not", "isinstance", "(", "seq", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "\"seq must be a string!\"", ")", "return", "''", ".", "join", "(", "[", "nuc_dict", "[", "c", "]", "for", "c", "in", "seq", ".", "upper", "(", ")", "[", ":", ":", "-", "1", "]", "]", ")" ]
DNA|RNA seq -> reverse complement
[ "DNA|RNA", "seq", "-", ">", "reverse", "complement" ]
382dcb5932d9750282906c356ca35e802bd68bd0
https://github.com/RobersonLab/motif_scraper/blob/382dcb5932d9750282906c356ca35e802bd68bd0/motif_scraper/__init__.py#L28-L42
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.from_json
def from_json(cls, key, scopes, subject=None): """Alternate constructor intended for using JSON format of private key. Args: key (dict) - Parsed JSON with service account credentials. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. Returns: ServiceAccount """ credentials_type = key['type'] if credentials_type != 'service_account': raise ValueError('key: expected type service_account ' '(got %s)' % credentials_type) email = key['client_email'] key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key['private_key']) return cls(key=key, email=email, scopes=scopes, subject=subject)
python
def from_json(cls, key, scopes, subject=None): """Alternate constructor intended for using JSON format of private key. Args: key (dict) - Parsed JSON with service account credentials. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. Returns: ServiceAccount """ credentials_type = key['type'] if credentials_type != 'service_account': raise ValueError('key: expected type service_account ' '(got %s)' % credentials_type) email = key['client_email'] key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key['private_key']) return cls(key=key, email=email, scopes=scopes, subject=subject)
[ "def", "from_json", "(", "cls", ",", "key", ",", "scopes", ",", "subject", "=", "None", ")", ":", "credentials_type", "=", "key", "[", "'type'", "]", "if", "credentials_type", "!=", "'service_account'", ":", "raise", "ValueError", "(", "'key: expected type service_account '", "'(got %s)'", "%", "credentials_type", ")", "email", "=", "key", "[", "'client_email'", "]", "key", "=", "OpenSSL", ".", "crypto", ".", "load_privatekey", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "key", "[", "'private_key'", "]", ")", "return", "cls", "(", "key", "=", "key", ",", "email", "=", "email", ",", "scopes", "=", "scopes", ",", "subject", "=", "subject", ")" ]
Alternate constructor intended for using JSON format of private key. Args: key (dict) - Parsed JSON with service account credentials. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. Returns: ServiceAccount
[ "Alternate", "constructor", "intended", "for", "using", "JSON", "format", "of", "private", "key", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L75-L96
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.from_pkcs12
def from_pkcs12(cls, key, email, scopes, subject=None, passphrase=PKCS12_PASSPHRASE): """Alternate constructor intended for using .p12 files. Args: key (dict) - Parsed JSON with service account credentials. email (str) - Service account email. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. passphrase (str) - Passphrase of private key file. Google generates .p12 files secured with fixed 'notasecret' passphrase, so if you didn't change it it's fine to omit this parameter. Returns: ServiceAccount """ key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey() return cls(key=key, email=email, scopes=scopes, subject=subject)
python
def from_pkcs12(cls, key, email, scopes, subject=None, passphrase=PKCS12_PASSPHRASE): """Alternate constructor intended for using .p12 files. Args: key (dict) - Parsed JSON with service account credentials. email (str) - Service account email. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. passphrase (str) - Passphrase of private key file. Google generates .p12 files secured with fixed 'notasecret' passphrase, so if you didn't change it it's fine to omit this parameter. Returns: ServiceAccount """ key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey() return cls(key=key, email=email, scopes=scopes, subject=subject)
[ "def", "from_pkcs12", "(", "cls", ",", "key", ",", "email", ",", "scopes", ",", "subject", "=", "None", ",", "passphrase", "=", "PKCS12_PASSPHRASE", ")", ":", "key", "=", "OpenSSL", ".", "crypto", ".", "load_pkcs12", "(", "key", ",", "passphrase", ")", ".", "get_privatekey", "(", ")", "return", "cls", "(", "key", "=", "key", ",", "email", "=", "email", ",", "scopes", "=", "scopes", ",", "subject", "=", "subject", ")" ]
Alternate constructor intended for using .p12 files. Args: key (dict) - Parsed JSON with service account credentials. email (str) - Service account email. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. passphrase (str) - Passphrase of private key file. Google generates .p12 files secured with fixed 'notasecret' passphrase, so if you didn't change it it's fine to omit this parameter. Returns: ServiceAccount
[ "Alternate", "constructor", "intended", "for", "using", ".", "p12", "files", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L99-L119
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.issued_at
def issued_at(self): """Time when access token was requested, as seconds since epoch. Note: Accessing this property when there wasn't any request attempts will return current time. Returns: int """ issued_at = self._issued_at if issued_at is None: self._issued_at = int(time.time()) return self._issued_at
python
def issued_at(self): """Time when access token was requested, as seconds since epoch. Note: Accessing this property when there wasn't any request attempts will return current time. Returns: int """ issued_at = self._issued_at if issued_at is None: self._issued_at = int(time.time()) return self._issued_at
[ "def", "issued_at", "(", "self", ")", ":", "issued_at", "=", "self", ".", "_issued_at", "if", "issued_at", "is", "None", ":", "self", ".", "_issued_at", "=", "int", "(", "time", ".", "time", "(", ")", ")", "return", "self", ".", "_issued_at" ]
Time when access token was requested, as seconds since epoch. Note: Accessing this property when there wasn't any request attempts will return current time. Returns: int
[ "Time", "when", "access", "token", "was", "requested", "as", "seconds", "since", "epoch", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L181-L194
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.access_token
def access_token(self): """Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str """ if (self._access_token is None or self.expiration_time <= int(time.time())): resp = self.make_access_request() self._access_token = resp.json()['access_token'] return self._access_token
python
def access_token(self): """Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str """ if (self._access_token is None or self.expiration_time <= int(time.time())): resp = self.make_access_request() self._access_token = resp.json()['access_token'] return self._access_token
[ "def", "access_token", "(", "self", ")", ":", "if", "(", "self", ".", "_access_token", "is", "None", "or", "self", ".", "expiration_time", "<=", "int", "(", "time", ".", "time", "(", ")", ")", ")", ":", "resp", "=", "self", ".", "make_access_request", "(", ")", "self", ".", "_access_token", "=", "resp", ".", "json", "(", ")", "[", "'access_token'", "]", "return", "self", ".", "_access_token" ]
Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str
[ "Stores", "always", "valid", "OAuth2", "access", "token", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L214-L228
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.make_access_request
def make_access_request(self): """Makes an OAuth2 access token request with crafted JWT and signature. The core of this module. Based on arguments it creates proper JWT for you and signs it with supplied private key. Regardless of present valid token, it always clears ``issued_at`` property, which in turn results in requesting fresh OAuth2 access token. Returns: requests.Response Raises: google_oauth.exceptions.AuthenticationError: If there was any non-200 HTTP-code from Google. requests.RequestException: Something went wrong when doing HTTP request. """ del self.issued_at assertion = b'.'.join((self.header(), self.claims(), self.signature())) post_data = { 'grant_type': GRANT_TYPE, 'assertion': assertion, } resp = requests.post(AUDIENCE, post_data) if resp.status_code != 200: raise AuthenticationError(resp) return resp
python
def make_access_request(self): """Makes an OAuth2 access token request with crafted JWT and signature. The core of this module. Based on arguments it creates proper JWT for you and signs it with supplied private key. Regardless of present valid token, it always clears ``issued_at`` property, which in turn results in requesting fresh OAuth2 access token. Returns: requests.Response Raises: google_oauth.exceptions.AuthenticationError: If there was any non-200 HTTP-code from Google. requests.RequestException: Something went wrong when doing HTTP request. """ del self.issued_at assertion = b'.'.join((self.header(), self.claims(), self.signature())) post_data = { 'grant_type': GRANT_TYPE, 'assertion': assertion, } resp = requests.post(AUDIENCE, post_data) if resp.status_code != 200: raise AuthenticationError(resp) return resp
[ "def", "make_access_request", "(", "self", ")", ":", "del", "self", ".", "issued_at", "assertion", "=", "b'.'", ".", "join", "(", "(", "self", ".", "header", "(", ")", ",", "self", ".", "claims", "(", ")", ",", "self", ".", "signature", "(", ")", ")", ")", "post_data", "=", "{", "'grant_type'", ":", "GRANT_TYPE", ",", "'assertion'", ":", "assertion", ",", "}", "resp", "=", "requests", ".", "post", "(", "AUDIENCE", ",", "post_data", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "raise", "AuthenticationError", "(", "resp", ")", "return", "resp" ]
Makes an OAuth2 access token request with crafted JWT and signature. The core of this module. Based on arguments it creates proper JWT for you and signs it with supplied private key. Regardless of present valid token, it always clears ``issued_at`` property, which in turn results in requesting fresh OAuth2 access token. Returns: requests.Response Raises: google_oauth.exceptions.AuthenticationError: If there was any non-200 HTTP-code from Google. requests.RequestException: Something went wrong when doing HTTP request.
[ "Makes", "an", "OAuth2", "access", "token", "request", "with", "crafted", "JWT", "and", "signature", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L259-L290
train
miedzinski/google-oauth
google_oauth/service.py
ServiceAccount.authorized_request
def authorized_request(self, method, url, **kwargs): """Shortcut for requests.request with proper Authorization header. Note: If you put auth keyword argument or Authorization in headers keyword argument, this will raise an exception. Decide what you want to do! Args: method (str) - HTTP method of this request, like GET or POST. url (str) - URL of this request (one of Google APIs). Examples: >>> scope = 'https://www.googleapis.com/auth/plus.login' >>> url = 'https://www.googleapis.com/plus/v1/people' \ >>> '?query=Guuido+van+Rossum' >>> key = json.load(open('/path/to/credentials.json')) >>> auth = ServiceAccount.from_json(key=key, scopes=scope) >>> auth.authorized_request(method='get', url=url) Returns: requests.Response """ headers = kwargs.pop('headers', {}) if headers.get('Authorization') or kwargs.get('auth'): raise ValueError("Found custom Authorization header, " "method call would override it.") headers['Authorization'] = 'Bearer ' + self.access_token return requests.request(method, url, headers=headers, **kwargs)
python
def authorized_request(self, method, url, **kwargs): """Shortcut for requests.request with proper Authorization header. Note: If you put auth keyword argument or Authorization in headers keyword argument, this will raise an exception. Decide what you want to do! Args: method (str) - HTTP method of this request, like GET or POST. url (str) - URL of this request (one of Google APIs). Examples: >>> scope = 'https://www.googleapis.com/auth/plus.login' >>> url = 'https://www.googleapis.com/plus/v1/people' \ >>> '?query=Guuido+van+Rossum' >>> key = json.load(open('/path/to/credentials.json')) >>> auth = ServiceAccount.from_json(key=key, scopes=scope) >>> auth.authorized_request(method='get', url=url) Returns: requests.Response """ headers = kwargs.pop('headers', {}) if headers.get('Authorization') or kwargs.get('auth'): raise ValueError("Found custom Authorization header, " "method call would override it.") headers['Authorization'] = 'Bearer ' + self.access_token return requests.request(method, url, headers=headers, **kwargs)
[ "def", "authorized_request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "pop", "(", "'headers'", ",", "{", "}", ")", "if", "headers", ".", "get", "(", "'Authorization'", ")", "or", "kwargs", ".", "get", "(", "'auth'", ")", ":", "raise", "ValueError", "(", "\"Found custom Authorization header, \"", "\"method call would override it.\"", ")", "headers", "[", "'Authorization'", "]", "=", "'Bearer '", "+", "self", ".", "access_token", "return", "requests", ".", "request", "(", "method", ",", "url", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")" ]
Shortcut for requests.request with proper Authorization header. Note: If you put auth keyword argument or Authorization in headers keyword argument, this will raise an exception. Decide what you want to do! Args: method (str) - HTTP method of this request, like GET or POST. url (str) - URL of this request (one of Google APIs). Examples: >>> scope = 'https://www.googleapis.com/auth/plus.login' >>> url = 'https://www.googleapis.com/plus/v1/people' \ >>> '?query=Guuido+van+Rossum' >>> key = json.load(open('/path/to/credentials.json')) >>> auth = ServiceAccount.from_json(key=key, scopes=scope) >>> auth.authorized_request(method='get', url=url) Returns: requests.Response
[ "Shortcut", "for", "requests", ".", "request", "with", "proper", "Authorization", "header", "." ]
aef2e19d87281b1d8e42d6b158111e14e80128db
https://github.com/miedzinski/google-oauth/blob/aef2e19d87281b1d8e42d6b158111e14e80128db/google_oauth/service.py#L292-L321
train
geophysics-ubonn/reda
lib/reda/importers/iris_syscal_pro.py
import_txt
def import_txt(filename, **kwargs): """Import Syscal measurements from a text file, exported as 'Spreadsheet'. Parameters ---------- filename: string input filename x0: float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing: float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals: int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` Returns ------- data: :py:class:`pandas.DataFrame` Contains the measurement data electrodes: :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography: None No topography information is contained in the text files, so we always return None Notes ----- * TODO: we could try to infer electrode spacing from the file itself """ # read in text file into a buffer with open(filename, 'r') as fid: text = fid.read() strings_to_replace = { 'Mixed / non conventional': 'Mixed/non-conventional', 'Date': 'Date Time AM-PM', } for key in strings_to_replace.keys(): text = text.replace(key, strings_to_replace[key]) buffer = StringIO(text) # read data file data_raw = pd.read_csv( buffer, # sep='\t', delim_whitespace=True, ) # clean up column names data_raw.columns = [x.strip() for x in data_raw.columns.tolist()] # generate electrode positions data = _convert_coords_to_abmn_X( data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['Vp'] / data_raw['In'] data['Vmn'] = data_raw['Vp'] data['Iab'] = data_raw['In'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: print('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] return data, None, None
python
def import_txt(filename, **kwargs): """Import Syscal measurements from a text file, exported as 'Spreadsheet'. Parameters ---------- filename: string input filename x0: float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing: float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals: int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` Returns ------- data: :py:class:`pandas.DataFrame` Contains the measurement data electrodes: :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography: None No topography information is contained in the text files, so we always return None Notes ----- * TODO: we could try to infer electrode spacing from the file itself """ # read in text file into a buffer with open(filename, 'r') as fid: text = fid.read() strings_to_replace = { 'Mixed / non conventional': 'Mixed/non-conventional', 'Date': 'Date Time AM-PM', } for key in strings_to_replace.keys(): text = text.replace(key, strings_to_replace[key]) buffer = StringIO(text) # read data file data_raw = pd.read_csv( buffer, # sep='\t', delim_whitespace=True, ) # clean up column names data_raw.columns = [x.strip() for x in data_raw.columns.tolist()] # generate electrode positions data = _convert_coords_to_abmn_X( data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['Vp'] / data_raw['In'] data['Vmn'] = data_raw['Vp'] data['Iab'] = data_raw['In'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: print('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] return data, None, None
[ "def", "import_txt", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "# read in text file into a buffer", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fid", ":", "text", "=", "fid", ".", "read", "(", ")", "strings_to_replace", "=", "{", "'Mixed / non conventional'", ":", "'Mixed/non-conventional'", ",", "'Date'", ":", "'Date Time AM-PM'", ",", "}", "for", "key", "in", "strings_to_replace", ".", "keys", "(", ")", ":", "text", "=", "text", ".", "replace", "(", "key", ",", "strings_to_replace", "[", "key", "]", ")", "buffer", "=", "StringIO", "(", "text", ")", "# read data file", "data_raw", "=", "pd", ".", "read_csv", "(", "buffer", ",", "# sep='\\t',", "delim_whitespace", "=", "True", ",", ")", "# clean up column names", "data_raw", ".", "columns", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "data_raw", ".", "columns", ".", "tolist", "(", ")", "]", "# generate electrode positions", "data", "=", "_convert_coords_to_abmn_X", "(", "data_raw", "[", "[", "'Spa.1'", ",", "'Spa.2'", ",", "'Spa.3'", ",", "'Spa.4'", "]", "]", ",", "*", "*", "kwargs", ")", "# [mV] / [mA]", "data", "[", "'r'", "]", "=", "data_raw", "[", "'Vp'", "]", "/", "data_raw", "[", "'In'", "]", "data", "[", "'Vmn'", "]", "=", "data_raw", "[", "'Vp'", "]", "data", "[", "'Iab'", "]", "=", "data_raw", "[", "'In'", "]", "# rename electrode denotations", "rec_max", "=", "kwargs", ".", "get", "(", "'reciprocals'", ",", "None", ")", "if", "rec_max", "is", "not", "None", ":", "print", "(", "'renumbering electrode numbers'", ")", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "=", "rec_max", "+", "1", "-", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "return", "data", ",", "None", ",", "None" ]
Import Syscal measurements from a text file, exported as 'Spreadsheet'. Parameters ---------- filename: string input filename x0: float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing: float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals: int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` Returns ------- data: :py:class:`pandas.DataFrame` Contains the measurement data electrodes: :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography: None No topography information is contained in the text files, so we always return None Notes ----- * TODO: we could try to infer electrode spacing from the file itself
[ "Import", "Syscal", "measurements", "from", "a", "text", "file", "exported", "as", "Spreadsheet", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/iris_syscal_pro.py#L80-L156
train
geophysics-ubonn/reda
lib/reda/importers/iris_syscal_pro.py
import_bin
def import_bin(filename, **kwargs): """Read a .bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing. This dataframe contains only information currently deemed important. Use the function reda.importers.iris_syscal_pro_binary._import_bin to extract ALL information from a given .bin file. Parameters ---------- filename : string path to input filename x0 : float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing : float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals : int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` check_meas_nums : bool if True, then check that the measurement numbers are consecutive. Don't return data after a jump to smaller measurement numbers (this usually indicates that more data points were downloaded than are part of a specific measurement. Default: True skip_rows : int Ignore this number of rows at the beginning, e.g., because they were inadvertently imported from an earlier measurement. Default: 0 Returns ------- data : :py:class:`pandas.DataFrame` Contains the measurement data electrodes : :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography : None No topography information is contained in the text files, so we always return None """ metadata, data_raw = _import_bin(filename) skip_rows = kwargs.get('skip_rows', 0) if skip_rows > 0: data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True) data_raw = data_raw.reset_index() if kwargs.get('check_meas_nums', True): # check that first number is 0 if data_raw['measurement_num'].iloc[0] != 0: print('WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)') # check that all measurement numbers increase by one if not np.all(np.diff(data_raw['measurement_num'])) == 1: print( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw['measurement_num'].diff()[1:] jump = np.where(diff != 1)[0] if len(jump) > 0: print('WARNING: One or more jumps in measurement numbers detected') print('The jump indices are:') for jump_nr in jump: print(jump_nr) print('Removing data points subsequent to the first jump') data_raw = data_raw.iloc[0:jump[0] + 1, :] if data_raw.shape[0] == 0: # no data present, return a bare DataFrame return pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']), None, None data = _convert_coords_to_abmn_X( data_raw[['x_a', 'x_b', 'x_m', 'x_n']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['vp'] / data_raw['Iab'] data['Vmn'] = data_raw['vp'] data['vab'] = data_raw['vab'] data['Iab'] = data_raw['Iab'] data['mdelay'] = data_raw['mdelay'] data['Tm'] = data_raw['Tm'] data['Mx'] = data_raw['Mx'] data['chargeability'] = data_raw['m'] data['q'] = data_raw['q'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: print('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] # print(data) return data, None, None
python
def import_bin(filename, **kwargs): """Read a .bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing. This dataframe contains only information currently deemed important. Use the function reda.importers.iris_syscal_pro_binary._import_bin to extract ALL information from a given .bin file. Parameters ---------- filename : string path to input filename x0 : float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing : float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals : int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` check_meas_nums : bool if True, then check that the measurement numbers are consecutive. Don't return data after a jump to smaller measurement numbers (this usually indicates that more data points were downloaded than are part of a specific measurement. Default: True skip_rows : int Ignore this number of rows at the beginning, e.g., because they were inadvertently imported from an earlier measurement. Default: 0 Returns ------- data : :py:class:`pandas.DataFrame` Contains the measurement data electrodes : :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography : None No topography information is contained in the text files, so we always return None """ metadata, data_raw = _import_bin(filename) skip_rows = kwargs.get('skip_rows', 0) if skip_rows > 0: data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True) data_raw = data_raw.reset_index() if kwargs.get('check_meas_nums', True): # check that first number is 0 if data_raw['measurement_num'].iloc[0] != 0: print('WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)') # check that all measurement numbers increase by one if not np.all(np.diff(data_raw['measurement_num'])) == 1: print( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw['measurement_num'].diff()[1:] jump = np.where(diff != 1)[0] if len(jump) > 0: print('WARNING: One or more jumps in measurement numbers detected') print('The jump indices are:') for jump_nr in jump: print(jump_nr) print('Removing data points subsequent to the first jump') data_raw = data_raw.iloc[0:jump[0] + 1, :] if data_raw.shape[0] == 0: # no data present, return a bare DataFrame return pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']), None, None data = _convert_coords_to_abmn_X( data_raw[['x_a', 'x_b', 'x_m', 'x_n']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['vp'] / data_raw['Iab'] data['Vmn'] = data_raw['vp'] data['vab'] = data_raw['vab'] data['Iab'] = data_raw['Iab'] data['mdelay'] = data_raw['mdelay'] data['Tm'] = data_raw['Tm'] data['Mx'] = data_raw['Mx'] data['chargeability'] = data_raw['m'] data['q'] = data_raw['q'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: print('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] # print(data) return data, None, None
[ "def", "import_bin", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "metadata", ",", "data_raw", "=", "_import_bin", "(", "filename", ")", "skip_rows", "=", "kwargs", ".", "get", "(", "'skip_rows'", ",", "0", ")", "if", "skip_rows", ">", "0", ":", "data_raw", ".", "drop", "(", "data_raw", ".", "index", "[", "range", "(", "0", ",", "skip_rows", ")", "]", ",", "inplace", "=", "True", ")", "data_raw", "=", "data_raw", ".", "reset_index", "(", ")", "if", "kwargs", ".", "get", "(", "'check_meas_nums'", ",", "True", ")", ":", "# check that first number is 0", "if", "data_raw", "[", "'measurement_num'", "]", ".", "iloc", "[", "0", "]", "!=", "0", ":", "print", "(", "'WARNING: Measurement numbers do not start with 0 '", "+", "'(did you download ALL data?)'", ")", "# check that all measurement numbers increase by one", "if", "not", "np", ".", "all", "(", "np", ".", "diff", "(", "data_raw", "[", "'measurement_num'", "]", ")", ")", "==", "1", ":", "print", "(", "'WARNING '", "'Measurement numbers are not consecutive. '", "'Perhaps the first measurement belongs to another measurement?'", "' Use the skip_rows parameter to skip those measurements'", ")", "# now check if there is a jump in measurement numbers somewhere", "# ignore first entry as this will always be nan", "diff", "=", "data_raw", "[", "'measurement_num'", "]", ".", "diff", "(", ")", "[", "1", ":", "]", "jump", "=", "np", ".", "where", "(", "diff", "!=", "1", ")", "[", "0", "]", "if", "len", "(", "jump", ")", ">", "0", ":", "print", "(", "'WARNING: One or more jumps in measurement numbers detected'", ")", "print", "(", "'The jump indices are:'", ")", "for", "jump_nr", "in", "jump", ":", "print", "(", "jump_nr", ")", "print", "(", "'Removing data points subsequent to the first jump'", ")", "data_raw", "=", "data_raw", ".", "iloc", "[", "0", ":", "jump", "[", "0", "]", "+", "1", ",", ":", "]", "if", "data_raw", ".", "shape", "[", "0", "]", "==", "0", ":", "# no data present, return a bare DataFrame", "return", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", ",", "'r'", "]", ")", ",", "None", ",", "None", "data", "=", "_convert_coords_to_abmn_X", "(", "data_raw", "[", "[", "'x_a'", ",", "'x_b'", ",", "'x_m'", ",", "'x_n'", "]", "]", ",", "*", "*", "kwargs", ")", "# [mV] / [mA]", "data", "[", "'r'", "]", "=", "data_raw", "[", "'vp'", "]", "/", "data_raw", "[", "'Iab'", "]", "data", "[", "'Vmn'", "]", "=", "data_raw", "[", "'vp'", "]", "data", "[", "'vab'", "]", "=", "data_raw", "[", "'vab'", "]", "data", "[", "'Iab'", "]", "=", "data_raw", "[", "'Iab'", "]", "data", "[", "'mdelay'", "]", "=", "data_raw", "[", "'mdelay'", "]", "data", "[", "'Tm'", "]", "=", "data_raw", "[", "'Tm'", "]", "data", "[", "'Mx'", "]", "=", "data_raw", "[", "'Mx'", "]", "data", "[", "'chargeability'", "]", "=", "data_raw", "[", "'m'", "]", "data", "[", "'q'", "]", "=", "data_raw", "[", "'q'", "]", "# rename electrode denotations", "rec_max", "=", "kwargs", ".", "get", "(", "'reciprocals'", ",", "None", ")", "if", "rec_max", "is", "not", "None", ":", "print", "(", "'renumbering electrode numbers'", ")", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "=", "rec_max", "+", "1", "-", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "# print(data)", "return", "data", ",", "None", ",", "None" ]
Read a .bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing. This dataframe contains only information currently deemed important. Use the function reda.importers.iris_syscal_pro_binary._import_bin to extract ALL information from a given .bin file. Parameters ---------- filename : string path to input filename x0 : float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing : float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals : int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` check_meas_nums : bool if True, then check that the measurement numbers are consecutive. Don't return data after a jump to smaller measurement numbers (this usually indicates that more data points were downloaded than are part of a specific measurement. Default: True skip_rows : int Ignore this number of rows at the beginning, e.g., because they were inadvertently imported from an earlier measurement. Default: 0 Returns ------- data : :py:class:`pandas.DataFrame` Contains the measurement data electrodes : :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography : None No topography information is contained in the text files, so we always return None
[ "Read", "a", ".", "bin", "file", "generated", "by", "the", "IRIS", "Instruments", "Syscal", "Pro", "System", "and", "return", "a", "curated", "dataframe", "for", "further", "processing", ".", "This", "dataframe", "contains", "only", "information", "currently", "deemed", "important", ".", "Use", "the", "function", "reda", ".", "importers", ".", "iris_syscal_pro_binary", ".", "_import_bin", "to", "extract", "ALL", "information", "from", "a", "given", ".", "bin", "file", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/iris_syscal_pro.py#L160-L266
train
lambdalisue/notify
src/notify/notifier.py
call_and_notificate
def call_and_notificate(args, opts): """ Execute specified arguments and send notification email Parameters ---------- args : list A execution command/arguments list opts : object A option instance """ # store starttime stctime = time.clock() stttime = time.time() stdtime = datetime.datetime.now() # call subprocess exit_code, output = call(args) # calculate delta cdelta = time.clock() - stctime tdelta = time.time() - stttime endtime = datetime.datetime.now() if exit_code == 0: status = u"Success" else: status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog': get_command_str(args), 'status': status, 'stdtime': stdtime, 'endtime': endtime, 'tdelta': tdelta, 'cdelta': cdelta, 'output': output, 'cwd': os.getcwd(), } # create email subject subject = opts.subject % { 'prog': get_command_str(args), 'status': status.lower(), } # create email message msg = create_message(opts.from_addr, opts.to_addr, subject, body, opts.encoding) # obtain password from keyring password = keyring.get_password('notify', opts.username) # send email send_email(msg, opts.host, opts.port, opts.username, password)
python
def call_and_notificate(args, opts): """ Execute specified arguments and send notification email Parameters ---------- args : list A execution command/arguments list opts : object A option instance """ # store starttime stctime = time.clock() stttime = time.time() stdtime = datetime.datetime.now() # call subprocess exit_code, output = call(args) # calculate delta cdelta = time.clock() - stctime tdelta = time.time() - stttime endtime = datetime.datetime.now() if exit_code == 0: status = u"Success" else: status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog': get_command_str(args), 'status': status, 'stdtime': stdtime, 'endtime': endtime, 'tdelta': tdelta, 'cdelta': cdelta, 'output': output, 'cwd': os.getcwd(), } # create email subject subject = opts.subject % { 'prog': get_command_str(args), 'status': status.lower(), } # create email message msg = create_message(opts.from_addr, opts.to_addr, subject, body, opts.encoding) # obtain password from keyring password = keyring.get_password('notify', opts.username) # send email send_email(msg, opts.host, opts.port, opts.username, password)
[ "def", "call_and_notificate", "(", "args", ",", "opts", ")", ":", "# store starttime", "stctime", "=", "time", ".", "clock", "(", ")", "stttime", "=", "time", ".", "time", "(", ")", "stdtime", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# call subprocess", "exit_code", ",", "output", "=", "call", "(", "args", ")", "# calculate delta", "cdelta", "=", "time", ".", "clock", "(", ")", "-", "stctime", "tdelta", "=", "time", ".", "time", "(", ")", "-", "stttime", "endtime", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "exit_code", "==", "0", ":", "status", "=", "u\"Success\"", "else", ":", "status", "=", "u\"Fail (%d)\"", "%", "exit_code", "# create email body", "body", "=", "EMAIL_BODY", "%", "{", "'prog'", ":", "get_command_str", "(", "args", ")", ",", "'status'", ":", "status", ",", "'stdtime'", ":", "stdtime", ",", "'endtime'", ":", "endtime", ",", "'tdelta'", ":", "tdelta", ",", "'cdelta'", ":", "cdelta", ",", "'output'", ":", "output", ",", "'cwd'", ":", "os", ".", "getcwd", "(", ")", ",", "}", "# create email subject", "subject", "=", "opts", ".", "subject", "%", "{", "'prog'", ":", "get_command_str", "(", "args", ")", ",", "'status'", ":", "status", ".", "lower", "(", ")", ",", "}", "# create email message", "msg", "=", "create_message", "(", "opts", ".", "from_addr", ",", "opts", ".", "to_addr", ",", "subject", ",", "body", ",", "opts", ".", "encoding", ")", "# obtain password from keyring", "password", "=", "keyring", ".", "get_password", "(", "'notify'", ",", "opts", ".", "username", ")", "# send email", "send_email", "(", "msg", ",", "opts", ".", "host", ",", "opts", ".", "port", ",", "opts", ".", "username", ",", "password", ")" ]
Execute specified arguments and send notification email Parameters ---------- args : list A execution command/arguments list opts : object A option instance
[ "Execute", "specified", "arguments", "and", "send", "notification", "email" ]
1b6d7d1faa2cea13bfaa1f35130f279a0115e686
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/notifier.py#L28-L78
train
gtaylor/django-athumb
athumb/fields.py
ImageWithThumbsFieldFile.get_thumbnail_format
def get_thumbnail_format(self): """ Determines the target thumbnail type either by looking for a format override specified at the model level, or by using the format the user uploaded. """ if self.field.thumbnail_format: # Over-ride was given, use that instead. return self.field.thumbnail_format.lower() else: # Use the existing extension from the file. filename_split = self.name.rsplit('.', 1) return filename_split[-1]
python
def get_thumbnail_format(self): """ Determines the target thumbnail type either by looking for a format override specified at the model level, or by using the format the user uploaded. """ if self.field.thumbnail_format: # Over-ride was given, use that instead. return self.field.thumbnail_format.lower() else: # Use the existing extension from the file. filename_split = self.name.rsplit('.', 1) return filename_split[-1]
[ "def", "get_thumbnail_format", "(", "self", ")", ":", "if", "self", ".", "field", ".", "thumbnail_format", ":", "# Over-ride was given, use that instead.", "return", "self", ".", "field", ".", "thumbnail_format", ".", "lower", "(", ")", "else", ":", "# Use the existing extension from the file.", "filename_split", "=", "self", ".", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "return", "filename_split", "[", "-", "1", "]" ]
Determines the target thumbnail type either by looking for a format override specified at the model level, or by using the format the user uploaded.
[ "Determines", "the", "target", "thumbnail", "type", "either", "by", "looking", "for", "a", "format", "override", "specified", "at", "the", "model", "level", "or", "by", "using", "the", "format", "the", "user", "uploaded", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L94-L106
train
gtaylor/django-athumb
athumb/fields.py
ImageWithThumbsFieldFile.save
def save(self, name, content, save=True): """ Handles some extra logic to generate the thumbnails when the original file is uploaded. """ super(ImageWithThumbsFieldFile, self).save(name, content, save) try: self.generate_thumbs(name, content) except IOError, exc: if 'cannot identify' in exc.message or \ 'bad EPS header' in exc.message: raise UploadedImageIsUnreadableError( "We were unable to read the uploaded image. " "Please make sure you are uploading a valid image file." ) else: raise
python
def save(self, name, content, save=True): """ Handles some extra logic to generate the thumbnails when the original file is uploaded. """ super(ImageWithThumbsFieldFile, self).save(name, content, save) try: self.generate_thumbs(name, content) except IOError, exc: if 'cannot identify' in exc.message or \ 'bad EPS header' in exc.message: raise UploadedImageIsUnreadableError( "We were unable to read the uploaded image. " "Please make sure you are uploading a valid image file." ) else: raise
[ "def", "save", "(", "self", ",", "name", ",", "content", ",", "save", "=", "True", ")", ":", "super", "(", "ImageWithThumbsFieldFile", ",", "self", ")", ".", "save", "(", "name", ",", "content", ",", "save", ")", "try", ":", "self", ".", "generate_thumbs", "(", "name", ",", "content", ")", "except", "IOError", ",", "exc", ":", "if", "'cannot identify'", "in", "exc", ".", "message", "or", "'bad EPS header'", "in", "exc", ".", "message", ":", "raise", "UploadedImageIsUnreadableError", "(", "\"We were unable to read the uploaded image. \"", "\"Please make sure you are uploading a valid image file.\"", ")", "else", ":", "raise" ]
Handles some extra logic to generate the thumbnails when the original file is uploaded.
[ "Handles", "some", "extra", "logic", "to", "generate", "the", "thumbnails", "when", "the", "original", "file", "is", "uploaded", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L108-L124
train
gtaylor/django-athumb
athumb/fields.py
ImageWithThumbsFieldFile.delete
def delete(self, save=True): """ Deletes the original, plus any thumbnails. Fails silently if there are errors deleting the thumbnails. """ for thumb in self.field.thumbs: thumb_name, thumb_options = thumb thumb_filename = self._calc_thumb_filename(thumb_name) self.storage.delete(thumb_filename) super(ImageWithThumbsFieldFile, self).delete(save)
python
def delete(self, save=True): """ Deletes the original, plus any thumbnails. Fails silently if there are errors deleting the thumbnails. """ for thumb in self.field.thumbs: thumb_name, thumb_options = thumb thumb_filename = self._calc_thumb_filename(thumb_name) self.storage.delete(thumb_filename) super(ImageWithThumbsFieldFile, self).delete(save)
[ "def", "delete", "(", "self", ",", "save", "=", "True", ")", ":", "for", "thumb", "in", "self", ".", "field", ".", "thumbs", ":", "thumb_name", ",", "thumb_options", "=", "thumb", "thumb_filename", "=", "self", ".", "_calc_thumb_filename", "(", "thumb_name", ")", "self", ".", "storage", ".", "delete", "(", "thumb_filename", ")", "super", "(", "ImageWithThumbsFieldFile", ",", "self", ")", ".", "delete", "(", "save", ")" ]
Deletes the original, plus any thumbnails. Fails silently if there are errors deleting the thumbnails.
[ "Deletes", "the", "original", "plus", "any", "thumbnails", ".", "Fails", "silently", "if", "there", "are", "errors", "deleting", "the", "thumbnails", "." ]
69261ace0dff81e33156a54440874456a7b38dfb
https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/fields.py#L197-L207
train
tony-landis/datomic-py
datomic/datomic.py
dump_edn_val
def dump_edn_val(v): " edn simple value dump" if isinstance(v, (str, unicode)): return json.dumps(v) elif isinstance(v, E): return unicode(v) else: return dumps(v)
python
def dump_edn_val(v): " edn simple value dump" if isinstance(v, (str, unicode)): return json.dumps(v) elif isinstance(v, E): return unicode(v) else: return dumps(v)
[ "def", "dump_edn_val", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "(", "str", ",", "unicode", ")", ")", ":", "return", "json", ".", "dumps", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "E", ")", ":", "return", "unicode", "(", "v", ")", "else", ":", "return", "dumps", "(", "v", ")" ]
edn simple value dump
[ "edn", "simple", "value", "dump" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L644-L651
train
tony-landis/datomic-py
datomic/datomic.py
DB.tx_schema
def tx_schema(self, **kwargs): """ Builds the data structure edn, and puts it in the db """ for s in self.schema.schema: tx = self.tx(s, **kwargs)
python
def tx_schema(self, **kwargs): """ Builds the data structure edn, and puts it in the db """ for s in self.schema.schema: tx = self.tx(s, **kwargs)
[ "def", "tx_schema", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "s", "in", "self", ".", "schema", ".", "schema", ":", "tx", "=", "self", ".", "tx", "(", "s", ",", "*", "*", "kwargs", ")" ]
Builds the data structure edn, and puts it in the db
[ "Builds", "the", "data", "structure", "edn", "and", "puts", "it", "in", "the", "db" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L67-L71
train
tony-landis/datomic-py
datomic/datomic.py
DB.tx
def tx(self, *args, **kwargs): """ Executes a raw tx string, or get a new TX object to work with. Passing a raw string or list of strings will immedately transact and return the API response as a dict. >>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}') {db-before: db-after: tempids: } This gets a fresh `TX()` to prepare a transaction with. >>> tx = db.tx() New `E()` object with person/fname and person/lname attributes >>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'}) New state and city objects referencing the state >>> state = tx.add('loc/state', 'WA') >>> city = tx.add('loc/city', 'Seattle', 'isin', state) Add person/city, person/state, and person/likes refs to the person entity >>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]}) Excute the transaction >>> resp = tx.tx() The resolved entity ids for our person >>> person.eid, state.eid, city.eid Fetch all attributes, behave like a dict >>> person.items() >>> person.iteritems() Access attribute as an attribute >>> person['person/name'] See `TX()` for options. """ if 0 == len(args): return TX(self) ops = [] for op in args: if isinstance(op, list): ops += op elif isinstance(op, (str,unicode)): ops.append(op) if 'debug' in kwargs: pp(ops) tx_proc ="[ %s ]" % "".join(ops) x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc}) return x
python
def tx(self, *args, **kwargs): """ Executes a raw tx string, or get a new TX object to work with. Passing a raw string or list of strings will immedately transact and return the API response as a dict. >>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}') {db-before: db-after: tempids: } This gets a fresh `TX()` to prepare a transaction with. >>> tx = db.tx() New `E()` object with person/fname and person/lname attributes >>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'}) New state and city objects referencing the state >>> state = tx.add('loc/state', 'WA') >>> city = tx.add('loc/city', 'Seattle', 'isin', state) Add person/city, person/state, and person/likes refs to the person entity >>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]}) Excute the transaction >>> resp = tx.tx() The resolved entity ids for our person >>> person.eid, state.eid, city.eid Fetch all attributes, behave like a dict >>> person.items() >>> person.iteritems() Access attribute as an attribute >>> person['person/name'] See `TX()` for options. """ if 0 == len(args): return TX(self) ops = [] for op in args: if isinstance(op, list): ops += op elif isinstance(op, (str,unicode)): ops.append(op) if 'debug' in kwargs: pp(ops) tx_proc ="[ %s ]" % "".join(ops) x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc}) return x
[ "def", "tx", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "0", "==", "len", "(", "args", ")", ":", "return", "TX", "(", "self", ")", "ops", "=", "[", "]", "for", "op", "in", "args", ":", "if", "isinstance", "(", "op", ",", "list", ")", ":", "ops", "+=", "op", "elif", "isinstance", "(", "op", ",", "(", "str", ",", "unicode", ")", ")", ":", "ops", ".", "append", "(", "op", ")", "if", "'debug'", "in", "kwargs", ":", "pp", "(", "ops", ")", "tx_proc", "=", "\"[ %s ]\"", "%", "\"\"", ".", "join", "(", "ops", ")", "x", "=", "self", ".", "rest", "(", "'POST'", ",", "self", ".", "uri_db", ",", "data", "=", "{", "\"tx-data\"", ":", "tx_proc", "}", ")", "return", "x" ]
Executes a raw tx string, or get a new TX object to work with. Passing a raw string or list of strings will immedately transact and return the API response as a dict. >>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}') {db-before: db-after: tempids: } This gets a fresh `TX()` to prepare a transaction with. >>> tx = db.tx() New `E()` object with person/fname and person/lname attributes >>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'}) New state and city objects referencing the state >>> state = tx.add('loc/state', 'WA') >>> city = tx.add('loc/city', 'Seattle', 'isin', state) Add person/city, person/state, and person/likes refs to the person entity >>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]}) Excute the transaction >>> resp = tx.tx() The resolved entity ids for our person >>> person.eid, state.eid, city.eid Fetch all attributes, behave like a dict >>> person.items() >>> person.iteritems() Access attribute as an attribute >>> person['person/name'] See `TX()` for options.
[ "Executes", "a", "raw", "tx", "string", "or", "get", "a", "new", "TX", "object", "to", "work", "with", "." ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L73-L118
train
tony-landis/datomic-py
datomic/datomic.py
DB.e
def e(self, eid): """Get an Entity """ ta = datetime.datetime.now() rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True) tb = datetime.datetime.now() - ta print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan') return rs
python
def e(self, eid): """Get an Entity """ ta = datetime.datetime.now() rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True) tb = datetime.datetime.now() - ta print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan') return rs
[ "def", "e", "(", "self", ",", "eid", ")", ":", "ta", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "rs", "=", "self", ".", "rest", "(", "'GET'", ",", "self", ".", "uri_db", "+", "'-/entity'", ",", "data", "=", "{", "'e'", ":", "int", "(", "eid", ")", "}", ",", "parse", "=", "True", ")", "tb", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "ta", "print", "cl", "(", "'<<< fetched entity %s in %sms'", "%", "(", "eid", ",", "tb", ".", "microseconds", "/", "1000.0", ")", ",", "'cyan'", ")", "return", "rs" ]
Get an Entity
[ "Get", "an", "Entity" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L120-L127
train
tony-landis/datomic-py
datomic/datomic.py
DB.retract
def retract(self, e, a, v): """ redact the value of an attribute """ ta = datetime.datetime.now() ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v)) rs = self.tx(ret) tb = datetime.datetime.now() - ta print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan') return rs
python
def retract(self, e, a, v): """ redact the value of an attribute """ ta = datetime.datetime.now() ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v)) rs = self.tx(ret) tb = datetime.datetime.now() - ta print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan') return rs
[ "def", "retract", "(", "self", ",", "e", ",", "a", ",", "v", ")", ":", "ta", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "ret", "=", "u\"[:db/retract %i :%s %s]\"", "%", "(", "e", ",", "a", ",", "dump_edn_val", "(", "v", ")", ")", "rs", "=", "self", ".", "tx", "(", "ret", ")", "tb", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "ta", "print", "cl", "(", "'<<< retracted %s,%s,%s in %sms'", "%", "(", "e", ",", "a", ",", "v", ",", "tb", ".", "microseconds", "/", "1000.0", ")", ",", "'cyan'", ")", "return", "rs" ]
redact the value of an attribute
[ "redact", "the", "value", "of", "an", "attribute" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L129-L137
train
tony-landis/datomic-py
datomic/datomic.py
DB.datoms
def datoms(self, index='aevt', e='', a='', v='', limit=0, offset=0, chunk=100, start='', end='', since='', as_of='', history='', **kwargs): """ Returns a lazy generator that will only fetch groups of datoms at the chunk size specified. http://docs.datomic.com/clojure/index.html#datomic.api/datoms """ assert index in ['aevt','eavt','avet','vaet'], "non-existant index" data = {'index': index, 'a': ':{0}'.format(a) if a else '', 'v': dump_edn_val(v) if v else '', 'e': int(e) if e else '', 'offset': offset or 0, 'start': start, 'end': end, 'limit': limit, 'history': 'true' if history else '', 'as-of': int(as_of) if as_of else '', 'since': int(since) if since else '', } data['limit'] = offset + chunk rs = True while rs and (data['offset'] < (limit or 1000000000)): ta = datetime.datetime.now() rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True) if not len(rs): rs = False tb = datetime.datetime.now() - ta print cl('<<< fetched %i datoms at offset %i in %sms' % ( len(rs), data['offset'], tb.microseconds/1000.0), 'cyan') for r in rs: yield r data['offset'] += chunk
python
def datoms(self, index='aevt', e='', a='', v='', limit=0, offset=0, chunk=100, start='', end='', since='', as_of='', history='', **kwargs): """ Returns a lazy generator that will only fetch groups of datoms at the chunk size specified. http://docs.datomic.com/clojure/index.html#datomic.api/datoms """ assert index in ['aevt','eavt','avet','vaet'], "non-existant index" data = {'index': index, 'a': ':{0}'.format(a) if a else '', 'v': dump_edn_val(v) if v else '', 'e': int(e) if e else '', 'offset': offset or 0, 'start': start, 'end': end, 'limit': limit, 'history': 'true' if history else '', 'as-of': int(as_of) if as_of else '', 'since': int(since) if since else '', } data['limit'] = offset + chunk rs = True while rs and (data['offset'] < (limit or 1000000000)): ta = datetime.datetime.now() rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True) if not len(rs): rs = False tb = datetime.datetime.now() - ta print cl('<<< fetched %i datoms at offset %i in %sms' % ( len(rs), data['offset'], tb.microseconds/1000.0), 'cyan') for r in rs: yield r data['offset'] += chunk
[ "def", "datoms", "(", "self", ",", "index", "=", "'aevt'", ",", "e", "=", "''", ",", "a", "=", "''", ",", "v", "=", "''", ",", "limit", "=", "0", ",", "offset", "=", "0", ",", "chunk", "=", "100", ",", "start", "=", "''", ",", "end", "=", "''", ",", "since", "=", "''", ",", "as_of", "=", "''", ",", "history", "=", "''", ",", "*", "*", "kwargs", ")", ":", "assert", "index", "in", "[", "'aevt'", ",", "'eavt'", ",", "'avet'", ",", "'vaet'", "]", ",", "\"non-existant index\"", "data", "=", "{", "'index'", ":", "index", ",", "'a'", ":", "':{0}'", ".", "format", "(", "a", ")", "if", "a", "else", "''", ",", "'v'", ":", "dump_edn_val", "(", "v", ")", "if", "v", "else", "''", ",", "'e'", ":", "int", "(", "e", ")", "if", "e", "else", "''", ",", "'offset'", ":", "offset", "or", "0", ",", "'start'", ":", "start", ",", "'end'", ":", "end", ",", "'limit'", ":", "limit", ",", "'history'", ":", "'true'", "if", "history", "else", "''", ",", "'as-of'", ":", "int", "(", "as_of", ")", "if", "as_of", "else", "''", ",", "'since'", ":", "int", "(", "since", ")", "if", "since", "else", "''", ",", "}", "data", "[", "'limit'", "]", "=", "offset", "+", "chunk", "rs", "=", "True", "while", "rs", "and", "(", "data", "[", "'offset'", "]", "<", "(", "limit", "or", "1000000000", ")", ")", ":", "ta", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "rs", "=", "self", ".", "rest", "(", "'GET'", ",", "self", ".", "uri_db", "+", "'-/datoms'", ",", "data", "=", "data", ",", "parse", "=", "True", ")", "if", "not", "len", "(", "rs", ")", ":", "rs", "=", "False", "tb", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "ta", "print", "cl", "(", "'<<< fetched %i datoms at offset %i in %sms'", "%", "(", "len", "(", "rs", ")", ",", "data", "[", "'offset'", "]", ",", "tb", ".", "microseconds", "/", "1000.0", ")", ",", "'cyan'", ")", "for", "r", "in", "rs", ":", "yield", "r", "data", "[", "'offset'", "]", "+=", "chunk" ]
Returns a lazy generator that will only fetch groups of datoms at the chunk size specified. http://docs.datomic.com/clojure/index.html#datomic.api/datoms
[ "Returns", "a", "lazy", "generator", "that", "will", "only", "fetch", "groups", "of", "datoms", "at", "the", "chunk", "size", "specified", "." ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L140-L172
train
tony-landis/datomic-py
datomic/datomic.py
DB.debug
def debug(self, defn, args, kwargs, fmt=None, color='green'): """ debug timing, colored terminal output """ ta = datetime.datetime.now() rs = defn(*args, **kwargs) tb = datetime.datetime.now() - ta fmt = fmt or "processed {defn} in {ms}ms" logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn) "terminal output" print cl(logmsg, color) "logging output" logging.debug(logmsg) return rs
python
def debug(self, defn, args, kwargs, fmt=None, color='green'): """ debug timing, colored terminal output """ ta = datetime.datetime.now() rs = defn(*args, **kwargs) tb = datetime.datetime.now() - ta fmt = fmt or "processed {defn} in {ms}ms" logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn) "terminal output" print cl(logmsg, color) "logging output" logging.debug(logmsg) return rs
[ "def", "debug", "(", "self", ",", "defn", ",", "args", ",", "kwargs", ",", "fmt", "=", "None", ",", "color", "=", "'green'", ")", ":", "ta", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "rs", "=", "defn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "tb", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "ta", "fmt", "=", "fmt", "or", "\"processed {defn} in {ms}ms\"", "logmsg", "=", "fmt", ".", "format", "(", "ms", "=", "tb", ".", "microseconds", "/", "1000.0", ",", "defn", "=", "defn", ")", "\"terminal output\"", "print", "cl", "(", "logmsg", ",", "color", ")", "\"logging output\"", "logging", ".", "debug", "(", "logmsg", ")", "return", "rs" ]
debug timing, colored terminal output
[ "debug", "timing", "colored", "terminal", "output" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L193-L205
train
tony-landis/datomic-py
datomic/datomic.py
DB.find
def find(self, *args, **kwargs): " new query builder on current db" return Query(*args, db=self, schema=self.schema)
python
def find(self, *args, **kwargs): " new query builder on current db" return Query(*args, db=self, schema=self.schema)
[ "def", "find", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "Query", "(", "*", "args", ",", "db", "=", "self", ",", "schema", "=", "self", ".", "schema", ")" ]
new query builder on current db
[ "new", "query", "builder", "on", "current", "db" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L223-L225
train
tony-landis/datomic-py
datomic/datomic.py
Query.hashone
def hashone(self): "execute query, get back" rs = self.one() if not rs: return {} else: finds = " ".join(self._find).split(' ') return dict(zip((x.replace('?','') for x in finds), rs))
python
def hashone(self): "execute query, get back" rs = self.one() if not rs: return {} else: finds = " ".join(self._find).split(' ') return dict(zip((x.replace('?','') for x in finds), rs))
[ "def", "hashone", "(", "self", ")", ":", "rs", "=", "self", ".", "one", "(", ")", "if", "not", "rs", ":", "return", "{", "}", "else", ":", "finds", "=", "\" \"", ".", "join", "(", "self", ".", "_find", ")", ".", "split", "(", "' '", ")", "return", "dict", "(", "zip", "(", "(", "x", ".", "replace", "(", "'?'", ",", "''", ")", "for", "x", "in", "finds", ")", ",", "rs", ")", ")" ]
execute query, get back
[ "execute", "query", "get", "back" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L302-L309
train
tony-landis/datomic-py
datomic/datomic.py
Query.all
def all(self): " execute query, get all list of lists" query,inputs = self._toedn() return self.db.q(query, inputs = inputs, limit = self._limit, offset = self._offset, history = self._history)
python
def all(self): " execute query, get all list of lists" query,inputs = self._toedn() return self.db.q(query, inputs = inputs, limit = self._limit, offset = self._offset, history = self._history)
[ "def", "all", "(", "self", ")", ":", "query", ",", "inputs", "=", "self", ".", "_toedn", "(", ")", "return", "self", ".", "db", ".", "q", "(", "query", ",", "inputs", "=", "inputs", ",", "limit", "=", "self", ".", "_limit", ",", "offset", "=", "self", ".", "_offset", ",", "history", "=", "self", ".", "_history", ")" ]
execute query, get all list of lists
[ "execute", "query", "get", "all", "list", "of", "lists" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L320-L327
train
tony-landis/datomic-py
datomic/datomic.py
Query._toedn
def _toedn(self): """ prepare the query for the rest api """ finds = u"" inputs = u"" wheres = u"" args = [] ": in and args" for a,b in self._input: inputs += " {0}".format(a) args.append(dump_edn_val(b)) if inputs: inputs = u":in ${0}".format(inputs) " :where " for where in self._where: if isinstance(where, (str,unicode)): wheres += u"[{0}]".format(where) elif isinstance(where, (list)): wheres += u" ".join([u"[{0}]".format(w) for w in where]) " find: " if self._find == []: #find all fs = set() for p in wheres.replace('[',' ').replace(']',' ').split(' '): if p.startswith('?'): fs.add(p) self._find = list(fs) finds = " ".join(self._find) " all togethr now..." q = u"""[ :find {0} {1} :where {2} ]""".\ format( finds, inputs, wheres) return q,args
python
def _toedn(self): """ prepare the query for the rest api """ finds = u"" inputs = u"" wheres = u"" args = [] ": in and args" for a,b in self._input: inputs += " {0}".format(a) args.append(dump_edn_val(b)) if inputs: inputs = u":in ${0}".format(inputs) " :where " for where in self._where: if isinstance(where, (str,unicode)): wheres += u"[{0}]".format(where) elif isinstance(where, (list)): wheres += u" ".join([u"[{0}]".format(w) for w in where]) " find: " if self._find == []: #find all fs = set() for p in wheres.replace('[',' ').replace(']',' ').split(' '): if p.startswith('?'): fs.add(p) self._find = list(fs) finds = " ".join(self._find) " all togethr now..." q = u"""[ :find {0} {1} :where {2} ]""".\ format( finds, inputs, wheres) return q,args
[ "def", "_toedn", "(", "self", ")", ":", "finds", "=", "u\"\"", "inputs", "=", "u\"\"", "wheres", "=", "u\"\"", "args", "=", "[", "]", "\": in and args\"", "for", "a", ",", "b", "in", "self", ".", "_input", ":", "inputs", "+=", "\" {0}\"", ".", "format", "(", "a", ")", "args", ".", "append", "(", "dump_edn_val", "(", "b", ")", ")", "if", "inputs", ":", "inputs", "=", "u\":in ${0}\"", ".", "format", "(", "inputs", ")", "\" :where \"", "for", "where", "in", "self", ".", "_where", ":", "if", "isinstance", "(", "where", ",", "(", "str", ",", "unicode", ")", ")", ":", "wheres", "+=", "u\"[{0}]\"", ".", "format", "(", "where", ")", "elif", "isinstance", "(", "where", ",", "(", "list", ")", ")", ":", "wheres", "+=", "u\" \"", ".", "join", "(", "[", "u\"[{0}]\"", ".", "format", "(", "w", ")", "for", "w", "in", "where", "]", ")", "\" find: \"", "if", "self", ".", "_find", "==", "[", "]", ":", "#find all", "fs", "=", "set", "(", ")", "for", "p", "in", "wheres", ".", "replace", "(", "'['", ",", "' '", ")", ".", "replace", "(", "']'", ",", "' '", ")", ".", "split", "(", "' '", ")", ":", "if", "p", ".", "startswith", "(", "'?'", ")", ":", "fs", ".", "add", "(", "p", ")", "self", ".", "_find", "=", "list", "(", "fs", ")", "finds", "=", "\" \"", ".", "join", "(", "self", ".", "_find", ")", "\" all togethr now...\"", "q", "=", "u\"\"\"[ :find {0} {1} :where {2} ]\"\"\"", ".", "format", "(", "finds", ",", "inputs", ",", "wheres", ")", "return", "q", ",", "args" ]
prepare the query for the rest api
[ "prepare", "the", "query", "for", "the", "rest", "api" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L329-L359
train
tony-landis/datomic-py
datomic/datomic.py
TX.add
def add(self, *args, **kwargs): """ Accumulate datums for the transaction Start a transaction on an existing db connection >>> tx = TX(db) Get get an entity object with a tempid >>> ref = add() >>> ref = add(0) >>> ref = add(None) >>> ref = add(False) Entity id passed as first argument (int|long) >>> tx.add(1, 'thing/name', 'value') Shorthand form for multiple attributes sharing a root namespace >>> tx.add(':thing/', {'name':'value', 'tag':'value'}) Attributes with a value of None are ignored >>> tx.add(':thing/ignored', None) Add multiple datums for an attribute with carinality:many >>> tx.add(':thing/color', ['red','white','blue']) """ assert self.resp is None, "Transaction already committed" entity, av_pairs, args = None, [], list(args) if len(args): if isinstance(args[0], (int, long)): " first arg is an entity or tempid" entity = E(args[0], tx=self) elif isinstance(args[0], E): " dont resuse entity from another tx" if args[0]._tx is self: entity = args[0] else: if int(args[0]) > 0: " use the entity id on a new obj" entity = E(int(args[0]), tx=self) args[0] = None " drop the first arg" if entity is not None or args[0] in (None, False, 0): v = args.pop(0) " auto generate a temp id?" if entity is None: entity = E(self.ctmpid, tx=self) self.ctmpid -= 1 " a,v from kwargs" if len(args) == 0 and kwargs: for a,v in kwargs.iteritems(): self.addeav(entity, a, v) " a,v from args " if len(args): assert len(args) % 2 == 0, "imbalanced a,v in args: " % args for first, second in pairwise(args): if not first.startswith(':'): first = ':' + first if not first.endswith('/'): " longhand used: blah/blah " if isinstance(second, list): for v in second: self.addeav(entity, first, v) else: self.addeav(entity, first, second) continue elif isinstance(second, dict): " shorthand used: blah/, dict " for a,v in second.iteritems(): self.addeav(entity, "%s%s" % (first, a), v) continue elif isinstance(second, (list, tuple)): " shorthand used: blah/, list|tuple " for a,v in pairwise(second): self.addeav(entity, "%s%s" % (first, a), v) continue else: raise Exception, "invalid pair: %s : %s" % (first,second) "pass back the entity so it can be resolved after tx()" return entity
python
def add(self, *args, **kwargs): """ Accumulate datums for the transaction Start a transaction on an existing db connection >>> tx = TX(db) Get get an entity object with a tempid >>> ref = add() >>> ref = add(0) >>> ref = add(None) >>> ref = add(False) Entity id passed as first argument (int|long) >>> tx.add(1, 'thing/name', 'value') Shorthand form for multiple attributes sharing a root namespace >>> tx.add(':thing/', {'name':'value', 'tag':'value'}) Attributes with a value of None are ignored >>> tx.add(':thing/ignored', None) Add multiple datums for an attribute with carinality:many >>> tx.add(':thing/color', ['red','white','blue']) """ assert self.resp is None, "Transaction already committed" entity, av_pairs, args = None, [], list(args) if len(args): if isinstance(args[0], (int, long)): " first arg is an entity or tempid" entity = E(args[0], tx=self) elif isinstance(args[0], E): " dont resuse entity from another tx" if args[0]._tx is self: entity = args[0] else: if int(args[0]) > 0: " use the entity id on a new obj" entity = E(int(args[0]), tx=self) args[0] = None " drop the first arg" if entity is not None or args[0] in (None, False, 0): v = args.pop(0) " auto generate a temp id?" if entity is None: entity = E(self.ctmpid, tx=self) self.ctmpid -= 1 " a,v from kwargs" if len(args) == 0 and kwargs: for a,v in kwargs.iteritems(): self.addeav(entity, a, v) " a,v from args " if len(args): assert len(args) % 2 == 0, "imbalanced a,v in args: " % args for first, second in pairwise(args): if not first.startswith(':'): first = ':' + first if not first.endswith('/'): " longhand used: blah/blah " if isinstance(second, list): for v in second: self.addeav(entity, first, v) else: self.addeav(entity, first, second) continue elif isinstance(second, dict): " shorthand used: blah/, dict " for a,v in second.iteritems(): self.addeav(entity, "%s%s" % (first, a), v) continue elif isinstance(second, (list, tuple)): " shorthand used: blah/, list|tuple " for a,v in pairwise(second): self.addeav(entity, "%s%s" % (first, a), v) continue else: raise Exception, "invalid pair: %s : %s" % (first,second) "pass back the entity so it can be resolved after tx()" return entity
[ "def", "add", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "self", ".", "resp", "is", "None", ",", "\"Transaction already committed\"", "entity", ",", "av_pairs", ",", "args", "=", "None", ",", "[", "]", ",", "list", "(", "args", ")", "if", "len", "(", "args", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "(", "int", ",", "long", ")", ")", ":", "\" first arg is an entity or tempid\"", "entity", "=", "E", "(", "args", "[", "0", "]", ",", "tx", "=", "self", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "E", ")", ":", "\" dont resuse entity from another tx\"", "if", "args", "[", "0", "]", ".", "_tx", "is", "self", ":", "entity", "=", "args", "[", "0", "]", "else", ":", "if", "int", "(", "args", "[", "0", "]", ")", ">", "0", ":", "\" use the entity id on a new obj\"", "entity", "=", "E", "(", "int", "(", "args", "[", "0", "]", ")", ",", "tx", "=", "self", ")", "args", "[", "0", "]", "=", "None", "\" drop the first arg\"", "if", "entity", "is", "not", "None", "or", "args", "[", "0", "]", "in", "(", "None", ",", "False", ",", "0", ")", ":", "v", "=", "args", ".", "pop", "(", "0", ")", "\" auto generate a temp id?\"", "if", "entity", "is", "None", ":", "entity", "=", "E", "(", "self", ".", "ctmpid", ",", "tx", "=", "self", ")", "self", ".", "ctmpid", "-=", "1", "\" a,v from kwargs\"", "if", "len", "(", "args", ")", "==", "0", "and", "kwargs", ":", "for", "a", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "self", ".", "addeav", "(", "entity", ",", "a", ",", "v", ")", "\" a,v from args \"", "if", "len", "(", "args", ")", ":", "assert", "len", "(", "args", ")", "%", "2", "==", "0", ",", "\"imbalanced a,v in args: \"", "%", "args", "for", "first", ",", "second", "in", "pairwise", "(", "args", ")", ":", "if", "not", "first", ".", "startswith", "(", "':'", ")", ":", "first", "=", "':'", "+", "first", "if", "not", "first", ".", "endswith", "(", "'/'", ")", ":", "\" longhand used: blah/blah \"", "if", "isinstance", "(", "second", ",", "list", ")", ":", "for", "v", "in", "second", ":", "self", ".", "addeav", "(", "entity", ",", "first", ",", "v", ")", "else", ":", "self", ".", "addeav", "(", "entity", ",", "first", ",", "second", ")", "continue", "elif", "isinstance", "(", "second", ",", "dict", ")", ":", "\" shorthand used: blah/, dict \"", "for", "a", ",", "v", "in", "second", ".", "iteritems", "(", ")", ":", "self", ".", "addeav", "(", "entity", ",", "\"%s%s\"", "%", "(", "first", ",", "a", ")", ",", "v", ")", "continue", "elif", "isinstance", "(", "second", ",", "(", "list", ",", "tuple", ")", ")", ":", "\" shorthand used: blah/, list|tuple \"", "for", "a", ",", "v", "in", "pairwise", "(", "second", ")", ":", "self", ".", "addeav", "(", "entity", ",", "\"%s%s\"", "%", "(", "first", ",", "a", ")", ",", "v", ")", "continue", "else", ":", "raise", "Exception", ",", "\"invalid pair: %s : %s\"", "%", "(", "first", ",", "second", ")", "\"pass back the entity so it can be resolved after tx()\"", "return", "entity" ]
Accumulate datums for the transaction Start a transaction on an existing db connection >>> tx = TX(db) Get get an entity object with a tempid >>> ref = add() >>> ref = add(0) >>> ref = add(None) >>> ref = add(False) Entity id passed as first argument (int|long) >>> tx.add(1, 'thing/name', 'value') Shorthand form for multiple attributes sharing a root namespace >>> tx.add(':thing/', {'name':'value', 'tag':'value'}) Attributes with a value of None are ignored >>> tx.add(':thing/ignored', None) Add multiple datums for an attribute with carinality:many >>> tx.add(':thing/color', ['red','white','blue'])
[ "Accumulate", "datums", "for", "the", "transaction" ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L515-L594
train
tony-landis/datomic-py
datomic/datomic.py
TX.resolve
def resolve(self): """ Resolve one or more tempids. Automatically takes place after transaction is executed. """ assert isinstance(self.resp, dict), "Transaction in uncommitted or failed state" rids = [(v) for k,v in self.resp['tempids'].items()] self.txid = self.resp['tx-data'][0]['tx'] rids.reverse() for t in self.tmpents: pos = self.tmpents.index(t) t._eid, t._txid = rids[pos], self.txid for t in self.realents: t._txid = self.txid
python
def resolve(self): """ Resolve one or more tempids. Automatically takes place after transaction is executed. """ assert isinstance(self.resp, dict), "Transaction in uncommitted or failed state" rids = [(v) for k,v in self.resp['tempids'].items()] self.txid = self.resp['tx-data'][0]['tx'] rids.reverse() for t in self.tmpents: pos = self.tmpents.index(t) t._eid, t._txid = rids[pos], self.txid for t in self.realents: t._txid = self.txid
[ "def", "resolve", "(", "self", ")", ":", "assert", "isinstance", "(", "self", ".", "resp", ",", "dict", ")", ",", "\"Transaction in uncommitted or failed state\"", "rids", "=", "[", "(", "v", ")", "for", "k", ",", "v", "in", "self", ".", "resp", "[", "'tempids'", "]", ".", "items", "(", ")", "]", "self", ".", "txid", "=", "self", ".", "resp", "[", "'tx-data'", "]", "[", "0", "]", "[", "'tx'", "]", "rids", ".", "reverse", "(", ")", "for", "t", "in", "self", ".", "tmpents", ":", "pos", "=", "self", ".", "tmpents", ".", "index", "(", "t", ")", "t", ".", "_eid", ",", "t", ".", "_txid", "=", "rids", "[", "pos", "]", ",", "self", ".", "txid", "for", "t", "in", "self", ".", "realents", ":", "t", ".", "_txid", "=", "self", ".", "txid" ]
Resolve one or more tempids. Automatically takes place after transaction is executed.
[ "Resolve", "one", "or", "more", "tempids", ".", "Automatically", "takes", "place", "after", "transaction", "is", "executed", "." ]
54f713d29ad85ba86d53d5115c9b312ff14b7846
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L611-L623
train
pennlabs/penn-sdk-python
penn/fitness.py
Fitness.get_usage
def get_usage(self): """Get fitness locations and their current usage.""" resp = requests.get(FITNESS_URL, timeout=30) resp.raise_for_status() soup = BeautifulSoup(resp.text, "html5lib") eastern = pytz.timezone('US/Eastern') output = [] for item in soup.findAll("div", {"class": "barChart"}): data = [x.strip() for x in item.get_text("\n").strip().split("\n")] data = [x for x in data if x] name = re.sub(r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$", "", data[0], re.I).strip() output.append({ "name": name, "open": "Open" in data[1], "count": int(data[2].rsplit(" ", 1)[-1]), "updated": eastern.localize(datetime.datetime.strptime(data[3][8:].strip(), '%m/%d/%Y %I:%M %p')).isoformat(), "percent": int(data[4][:-1]) }) return output
python
def get_usage(self): """Get fitness locations and their current usage.""" resp = requests.get(FITNESS_URL, timeout=30) resp.raise_for_status() soup = BeautifulSoup(resp.text, "html5lib") eastern = pytz.timezone('US/Eastern') output = [] for item in soup.findAll("div", {"class": "barChart"}): data = [x.strip() for x in item.get_text("\n").strip().split("\n")] data = [x for x in data if x] name = re.sub(r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$", "", data[0], re.I).strip() output.append({ "name": name, "open": "Open" in data[1], "count": int(data[2].rsplit(" ", 1)[-1]), "updated": eastern.localize(datetime.datetime.strptime(data[3][8:].strip(), '%m/%d/%Y %I:%M %p')).isoformat(), "percent": int(data[4][:-1]) }) return output
[ "def", "get_usage", "(", "self", ")", ":", "resp", "=", "requests", ".", "get", "(", "FITNESS_URL", ",", "timeout", "=", "30", ")", "resp", ".", "raise_for_status", "(", ")", "soup", "=", "BeautifulSoup", "(", "resp", ".", "text", ",", "\"html5lib\"", ")", "eastern", "=", "pytz", ".", "timezone", "(", "'US/Eastern'", ")", "output", "=", "[", "]", "for", "item", "in", "soup", ".", "findAll", "(", "\"div\"", ",", "{", "\"class\"", ":", "\"barChart\"", "}", ")", ":", "data", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "item", ".", "get_text", "(", "\"\\n\"", ")", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "]", "data", "=", "[", "x", "for", "x", "in", "data", "if", "x", "]", "name", "=", "re", ".", "sub", "(", "r\"\\s*(Hours)?\\s*-?\\s*(CLOSED|OPEN)?$\"", ",", "\"\"", ",", "data", "[", "0", "]", ",", "re", ".", "I", ")", ".", "strip", "(", ")", "output", ".", "append", "(", "{", "\"name\"", ":", "name", ",", "\"open\"", ":", "\"Open\"", "in", "data", "[", "1", "]", ",", "\"count\"", ":", "int", "(", "data", "[", "2", "]", ".", "rsplit", "(", "\" \"", ",", "1", ")", "[", "-", "1", "]", ")", ",", "\"updated\"", ":", "eastern", ".", "localize", "(", "datetime", ".", "datetime", ".", "strptime", "(", "data", "[", "3", "]", "[", "8", ":", "]", ".", "strip", "(", ")", ",", "'%m/%d/%Y %I:%M %p'", ")", ")", ".", "isoformat", "(", ")", ",", "\"percent\"", ":", "int", "(", "data", "[", "4", "]", "[", ":", "-", "1", "]", ")", "}", ")", "return", "output" ]
Get fitness locations and their current usage.
[ "Get", "fitness", "locations", "and", "their", "current", "usage", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/fitness.py#L49-L69
train
pennlabs/penn-sdk-python
penn/map.py
Map.search
def search(self, keyword): """Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison') """ params = { "source": "map", "description": keyword } data = self._request(ENDPOINTS['SEARCH'], params) data['result_data'] = [res for res in data['result_data'] if isinstance(res, dict)] return data
python
def search(self, keyword): """Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison') """ params = { "source": "map", "description": keyword } data = self._request(ENDPOINTS['SEARCH'], params) data['result_data'] = [res for res in data['result_data'] if isinstance(res, dict)] return data
[ "def", "search", "(", "self", ",", "keyword", ")", ":", "params", "=", "{", "\"source\"", ":", "\"map\"", ",", "\"description\"", ":", "keyword", "}", "data", "=", "self", ".", "_request", "(", "ENDPOINTS", "[", "'SEARCH'", "]", ",", "params", ")", "data", "[", "'result_data'", "]", "=", "[", "res", "for", "res", "in", "data", "[", "'result_data'", "]", "if", "isinstance", "(", "res", ",", "dict", ")", "]", "return", "data" ]
Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison')
[ "Return", "all", "buildings", "related", "to", "the", "provided", "query", "." ]
31ff12c20d69438d63bc7a796f83ce4f4c828396
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/map.py#L21-L35
train
geophysics-ubonn/reda
lib/reda/utils/geometric_factors.py
compute_K_numerical
def compute_K_numerical(dataframe, settings=None, keep_dir=None): """Use a finite-element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings. Parameters ---------- dataframe : pandas.DataFrame the data frame that contains the data settings : dict The settings required to compute the geometric factors. See examples down below for more information in the required content. keep_dir : path if not None, copy modeling dir here Returns ------- K : :class:`numpy.ndarray` K factors (are also directly written to the dataframe) Examples -------- :: settings = { 'rho': 100, 'elem': 'elem.dat', 'elec': 'elec.dat', 'sink_node': '100', '2D': False, } """ inversion_code = reda.rcParams.get('geom_factor.inversion_code', 'crtomo') if inversion_code == 'crtomo': import reda.utils.geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None: keep_dir = os.path.abspath(keep_dir) K = geom_fac_crtomo.compute_K( dataframe, settings, keep_dir) else: raise Exception( 'Inversion code {0} not implemented for K computation'.format( inversion_code )) return K
python
def compute_K_numerical(dataframe, settings=None, keep_dir=None): """Use a finite-element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings. Parameters ---------- dataframe : pandas.DataFrame the data frame that contains the data settings : dict The settings required to compute the geometric factors. See examples down below for more information in the required content. keep_dir : path if not None, copy modeling dir here Returns ------- K : :class:`numpy.ndarray` K factors (are also directly written to the dataframe) Examples -------- :: settings = { 'rho': 100, 'elem': 'elem.dat', 'elec': 'elec.dat', 'sink_node': '100', '2D': False, } """ inversion_code = reda.rcParams.get('geom_factor.inversion_code', 'crtomo') if inversion_code == 'crtomo': import reda.utils.geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None: keep_dir = os.path.abspath(keep_dir) K = geom_fac_crtomo.compute_K( dataframe, settings, keep_dir) else: raise Exception( 'Inversion code {0} not implemented for K computation'.format( inversion_code )) return K
[ "def", "compute_K_numerical", "(", "dataframe", ",", "settings", "=", "None", ",", "keep_dir", "=", "None", ")", ":", "inversion_code", "=", "reda", ".", "rcParams", ".", "get", "(", "'geom_factor.inversion_code'", ",", "'crtomo'", ")", "if", "inversion_code", "==", "'crtomo'", ":", "import", "reda", ".", "utils", ".", "geom_fac_crtomo", "as", "geom_fac_crtomo", "if", "keep_dir", "is", "not", "None", ":", "keep_dir", "=", "os", ".", "path", ".", "abspath", "(", "keep_dir", ")", "K", "=", "geom_fac_crtomo", ".", "compute_K", "(", "dataframe", ",", "settings", ",", "keep_dir", ")", "else", ":", "raise", "Exception", "(", "'Inversion code {0} not implemented for K computation'", ".", "format", "(", "inversion_code", ")", ")", "return", "K" ]
Use a finite-element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings. Parameters ---------- dataframe : pandas.DataFrame the data frame that contains the data settings : dict The settings required to compute the geometric factors. See examples down below for more information in the required content. keep_dir : path if not None, copy modeling dir here Returns ------- K : :class:`numpy.ndarray` K factors (are also directly written to the dataframe) Examples -------- :: settings = { 'rho': 100, 'elem': 'elem.dat', 'elec': 'elec.dat', 'sink_node': '100', '2D': False, }
[ "Use", "a", "finite", "-", "element", "modeling", "code", "to", "infer", "geometric", "factors", "for", "meshes", "with", "topography", "or", "irregular", "electrode", "spacings", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geometric_factors.py#L29-L74
train
fuzeman/PyUPnP
pyupnp/lict.py
Lict._get_object_key
def _get_object_key(self, p_object): """Get key from object""" matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
python
def _get_object_key(self, p_object): """Get key from object""" matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
[ "def", "_get_object_key", "(", "self", ",", "p_object", ")", ":", "matched_key", "=", "None", "matched_index", "=", "None", "if", "hasattr", "(", "p_object", ",", "self", ".", "_searchNames", "[", "0", "]", ")", ":", "return", "getattr", "(", "p_object", ",", "self", ".", "_searchNames", "[", "0", "]", ")", "for", "x", "in", "xrange", "(", "len", "(", "self", ".", "_searchNames", ")", ")", ":", "key", "=", "self", ".", "_searchNames", "[", "x", "]", "if", "hasattr", "(", "p_object", ",", "key", ")", ":", "matched_key", "=", "key", "matched_index", "=", "x", "if", "matched_key", "is", "None", ":", "raise", "KeyError", "(", ")", "if", "matched_index", "!=", "0", "and", "self", ".", "_searchOptimize", ":", "self", ".", "_searchNames", ".", "insert", "(", "0", ",", "self", ".", "_searchNames", ".", "pop", "(", "matched_index", ")", ")", "return", "getattr", "(", "p_object", ",", "matched_key", ")" ]
Get key from object
[ "Get", "key", "from", "object" ]
6dea64be299952346a14300ab6cc7dac42736433
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L52-L72
train
south-coast-science/scs_core
src/scs_core/gas/pid_temp_comp.py
PIDTempComp.correct
def correct(self, temp, we_t): """ Compute weC from weT """ if not PIDTempComp.in_range(temp): return None n_t = self.cf_t(temp) if n_t is None: return None we_c = we_t * n_t return we_c
python
def correct(self, temp, we_t): """ Compute weC from weT """ if not PIDTempComp.in_range(temp): return None n_t = self.cf_t(temp) if n_t is None: return None we_c = we_t * n_t return we_c
[ "def", "correct", "(", "self", ",", "temp", ",", "we_t", ")", ":", "if", "not", "PIDTempComp", ".", "in_range", "(", "temp", ")", ":", "return", "None", "n_t", "=", "self", ".", "cf_t", "(", "temp", ")", "if", "n_t", "is", "None", ":", "return", "None", "we_c", "=", "we_t", "*", "n_t", "return", "we_c" ]
Compute weC from weT
[ "Compute", "weC", "from", "weT" ]
a4152b0bbed6acbbf257e1bba6a912f6ebe578e5
https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/pid_temp_comp.py#L66-L80
train
geophysics-ubonn/reda
lib/reda/utils/norrec.py
compute_norrec_differences
def compute_norrec_differences(df, keys_diff): """DO NOT USE ANY MORE - DEPRECIATED! """ raise Exception('This function is depreciated!') print('computing normal-reciprocal differences') # df.sort_index(level='norrec') def norrec_diff(x): """compute norrec_diff""" if x.shape[0] != 2: return np.nan else: return np.abs(x.iloc[1] - x.iloc[0]) keys_keep = list(set(df.columns.tolist()) - set(keys_diff)) agg_dict = {x: _first for x in keys_keep} agg_dict.update({x: norrec_diff for x in keys_diff}) for key in ('id', 'timestep', 'frequency'): if key in agg_dict: del(agg_dict[key]) # for frequencies, we could (I think) somehow prevent grouping by # frequencies... df = df.groupby(('timestep', 'frequency', 'id')).agg(agg_dict) # df.rename(columns={'r': 'Rdiff'}, inplace=True) df.reset_index() return df
python
def compute_norrec_differences(df, keys_diff): """DO NOT USE ANY MORE - DEPRECIATED! """ raise Exception('This function is depreciated!') print('computing normal-reciprocal differences') # df.sort_index(level='norrec') def norrec_diff(x): """compute norrec_diff""" if x.shape[0] != 2: return np.nan else: return np.abs(x.iloc[1] - x.iloc[0]) keys_keep = list(set(df.columns.tolist()) - set(keys_diff)) agg_dict = {x: _first for x in keys_keep} agg_dict.update({x: norrec_diff for x in keys_diff}) for key in ('id', 'timestep', 'frequency'): if key in agg_dict: del(agg_dict[key]) # for frequencies, we could (I think) somehow prevent grouping by # frequencies... df = df.groupby(('timestep', 'frequency', 'id')).agg(agg_dict) # df.rename(columns={'r': 'Rdiff'}, inplace=True) df.reset_index() return df
[ "def", "compute_norrec_differences", "(", "df", ",", "keys_diff", ")", ":", "raise", "Exception", "(", "'This function is depreciated!'", ")", "print", "(", "'computing normal-reciprocal differences'", ")", "# df.sort_index(level='norrec')", "def", "norrec_diff", "(", "x", ")", ":", "\"\"\"compute norrec_diff\"\"\"", "if", "x", ".", "shape", "[", "0", "]", "!=", "2", ":", "return", "np", ".", "nan", "else", ":", "return", "np", ".", "abs", "(", "x", ".", "iloc", "[", "1", "]", "-", "x", ".", "iloc", "[", "0", "]", ")", "keys_keep", "=", "list", "(", "set", "(", "df", ".", "columns", ".", "tolist", "(", ")", ")", "-", "set", "(", "keys_diff", ")", ")", "agg_dict", "=", "{", "x", ":", "_first", "for", "x", "in", "keys_keep", "}", "agg_dict", ".", "update", "(", "{", "x", ":", "norrec_diff", "for", "x", "in", "keys_diff", "}", ")", "for", "key", "in", "(", "'id'", ",", "'timestep'", ",", "'frequency'", ")", ":", "if", "key", "in", "agg_dict", ":", "del", "(", "agg_dict", "[", "key", "]", ")", "# for frequencies, we could (I think) somehow prevent grouping by", "# frequencies...", "df", "=", "df", ".", "groupby", "(", "(", "'timestep'", ",", "'frequency'", ",", "'id'", ")", ")", ".", "agg", "(", "agg_dict", ")", "# df.rename(columns={'r': 'Rdiff'}, inplace=True)", "df", ".", "reset_index", "(", ")", "return", "df" ]
DO NOT USE ANY MORE - DEPRECIATED!
[ "DO", "NOT", "USE", "ANY", "MORE", "-", "DEPRECIATED!" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L48-L75
train
geophysics-ubonn/reda
lib/reda/utils/norrec.py
_normalize_abmn
def _normalize_abmn(abmn): """return a normalized version of abmn """ abmn_2d = np.atleast_2d(abmn) abmn_normalized = np.hstack(( np.sort(abmn_2d[:, 0:2], axis=1), np.sort(abmn_2d[:, 2:4], axis=1), )) return abmn_normalized
python
def _normalize_abmn(abmn): """return a normalized version of abmn """ abmn_2d = np.atleast_2d(abmn) abmn_normalized = np.hstack(( np.sort(abmn_2d[:, 0:2], axis=1), np.sort(abmn_2d[:, 2:4], axis=1), )) return abmn_normalized
[ "def", "_normalize_abmn", "(", "abmn", ")", ":", "abmn_2d", "=", "np", ".", "atleast_2d", "(", "abmn", ")", "abmn_normalized", "=", "np", ".", "hstack", "(", "(", "np", ".", "sort", "(", "abmn_2d", "[", ":", ",", "0", ":", "2", "]", ",", "axis", "=", "1", ")", ",", "np", ".", "sort", "(", "abmn_2d", "[", ":", ",", "2", ":", "4", "]", ",", "axis", "=", "1", ")", ",", ")", ")", "return", "abmn_normalized" ]
return a normalized version of abmn
[ "return", "a", "normalized", "version", "of", "abmn" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L78-L86
train
geophysics-ubonn/reda
lib/reda/utils/norrec.py
assign_norrec_diffs
def assign_norrec_diffs(df, diff_list): """Compute and write the difference between normal and reciprocal values for all columns specified in the diff_list parameter. Note that the DataFrame is directly written to. That is, it is changed during the call of this function. No need to use the returned object. Parameters ---------- df: pandas.DataFrame Dataframe containing the data diff_list: list list of columns to compute differences for. Returns ------- df_new: pandas.DataFrame The data with added columns """ extra_dims = [ x for x in ('timestep', 'frequency', 'id') if x in df.columns ] g = df.groupby(extra_dims) def subrow(row): if row.size == 2: return row.iloc[1] - row.iloc[0] else: return np.nan for diffcol in diff_list: diff = g[diffcol].agg(subrow).reset_index() # rename the column cols = list(diff.columns) cols[-1] = diffcol + 'diff' diff.columns = cols df = df.drop( cols[-1], axis=1, errors='ignore' ).merge(diff, on=extra_dims, how='outer') df = df.sort_values(extra_dims) return df
python
def assign_norrec_diffs(df, diff_list): """Compute and write the difference between normal and reciprocal values for all columns specified in the diff_list parameter. Note that the DataFrame is directly written to. That is, it is changed during the call of this function. No need to use the returned object. Parameters ---------- df: pandas.DataFrame Dataframe containing the data diff_list: list list of columns to compute differences for. Returns ------- df_new: pandas.DataFrame The data with added columns """ extra_dims = [ x for x in ('timestep', 'frequency', 'id') if x in df.columns ] g = df.groupby(extra_dims) def subrow(row): if row.size == 2: return row.iloc[1] - row.iloc[0] else: return np.nan for diffcol in diff_list: diff = g[diffcol].agg(subrow).reset_index() # rename the column cols = list(diff.columns) cols[-1] = diffcol + 'diff' diff.columns = cols df = df.drop( cols[-1], axis=1, errors='ignore' ).merge(diff, on=extra_dims, how='outer') df = df.sort_values(extra_dims) return df
[ "def", "assign_norrec_diffs", "(", "df", ",", "diff_list", ")", ":", "extra_dims", "=", "[", "x", "for", "x", "in", "(", "'timestep'", ",", "'frequency'", ",", "'id'", ")", "if", "x", "in", "df", ".", "columns", "]", "g", "=", "df", ".", "groupby", "(", "extra_dims", ")", "def", "subrow", "(", "row", ")", ":", "if", "row", ".", "size", "==", "2", ":", "return", "row", ".", "iloc", "[", "1", "]", "-", "row", ".", "iloc", "[", "0", "]", "else", ":", "return", "np", ".", "nan", "for", "diffcol", "in", "diff_list", ":", "diff", "=", "g", "[", "diffcol", "]", ".", "agg", "(", "subrow", ")", ".", "reset_index", "(", ")", "# rename the column", "cols", "=", "list", "(", "diff", ".", "columns", ")", "cols", "[", "-", "1", "]", "=", "diffcol", "+", "'diff'", "diff", ".", "columns", "=", "cols", "df", "=", "df", ".", "drop", "(", "cols", "[", "-", "1", "]", ",", "axis", "=", "1", ",", "errors", "=", "'ignore'", ")", ".", "merge", "(", "diff", ",", "on", "=", "extra_dims", ",", "how", "=", "'outer'", ")", "df", "=", "df", ".", "sort_values", "(", "extra_dims", ")", "return", "df" ]
Compute and write the difference between normal and reciprocal values for all columns specified in the diff_list parameter. Note that the DataFrame is directly written to. That is, it is changed during the call of this function. No need to use the returned object. Parameters ---------- df: pandas.DataFrame Dataframe containing the data diff_list: list list of columns to compute differences for. Returns ------- df_new: pandas.DataFrame The data with added columns
[ "Compute", "and", "write", "the", "difference", "between", "normal", "and", "reciprocal", "values", "for", "all", "columns", "specified", "in", "the", "diff_list", "parameter", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/norrec.py#L336-L378
train
marazmiki/django-ulogin
django_ulogin/views.py
PostBackView.handle_authenticated_user
def handle_authenticated_user(self, response): """ Handles the ULogin response if user is already authenticated """ current_user = get_user(self.request) ulogin, registered = ULoginUser.objects.get_or_create( uid=response['uid'], network=response['network'], defaults={'identity': response['identity'], 'user': current_user}) if not registered: ulogin_user = ulogin.user logger.debug('uLogin user already exists') if current_user != ulogin_user: logger.debug( "Mismatch: %s is not a %s. Take over it!" % (current_user, ulogin_user) ) ulogin.user = current_user ulogin.save() return get_user(self.request), ulogin, registered
python
def handle_authenticated_user(self, response): """ Handles the ULogin response if user is already authenticated """ current_user = get_user(self.request) ulogin, registered = ULoginUser.objects.get_or_create( uid=response['uid'], network=response['network'], defaults={'identity': response['identity'], 'user': current_user}) if not registered: ulogin_user = ulogin.user logger.debug('uLogin user already exists') if current_user != ulogin_user: logger.debug( "Mismatch: %s is not a %s. Take over it!" % (current_user, ulogin_user) ) ulogin.user = current_user ulogin.save() return get_user(self.request), ulogin, registered
[ "def", "handle_authenticated_user", "(", "self", ",", "response", ")", ":", "current_user", "=", "get_user", "(", "self", ".", "request", ")", "ulogin", ",", "registered", "=", "ULoginUser", ".", "objects", ".", "get_or_create", "(", "uid", "=", "response", "[", "'uid'", "]", ",", "network", "=", "response", "[", "'network'", "]", ",", "defaults", "=", "{", "'identity'", ":", "response", "[", "'identity'", "]", ",", "'user'", ":", "current_user", "}", ")", "if", "not", "registered", ":", "ulogin_user", "=", "ulogin", ".", "user", "logger", ".", "debug", "(", "'uLogin user already exists'", ")", "if", "current_user", "!=", "ulogin_user", ":", "logger", ".", "debug", "(", "\"Mismatch: %s is not a %s. Take over it!\"", "%", "(", "current_user", ",", "ulogin_user", ")", ")", "ulogin", ".", "user", "=", "current_user", "ulogin", ".", "save", "(", ")", "return", "get_user", "(", "self", ".", "request", ")", ",", "ulogin", ",", "registered" ]
Handles the ULogin response if user is already authenticated
[ "Handles", "the", "ULogin", "response", "if", "user", "is", "already", "authenticated" ]
f41ad4b4ca130ad8af25be72ad882c8cf94a80dc
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L82-L107
train
marazmiki/django-ulogin
django_ulogin/views.py
PostBackView.form_valid
def form_valid(self, form): """ The request from ulogin service is correct """ response = self.ulogin_response(form.cleaned_data['token'], self.request.get_host()) if 'error' in response: return render(self.request, self.error_template_name, {'json': response}) if user_is_authenticated(get_user(self.request)): user, identity, registered = \ self.handle_authenticated_user(response) else: user, identity, registered = \ self.handle_anonymous_user(response) assign.send(sender=ULoginUser, user=get_user(self.request), request=self.request, registered=registered, ulogin_user=identity, ulogin_data=response) return redirect(self.request.GET.get(REDIRECT_FIELD_NAME) or '/')
python
def form_valid(self, form): """ The request from ulogin service is correct """ response = self.ulogin_response(form.cleaned_data['token'], self.request.get_host()) if 'error' in response: return render(self.request, self.error_template_name, {'json': response}) if user_is_authenticated(get_user(self.request)): user, identity, registered = \ self.handle_authenticated_user(response) else: user, identity, registered = \ self.handle_anonymous_user(response) assign.send(sender=ULoginUser, user=get_user(self.request), request=self.request, registered=registered, ulogin_user=identity, ulogin_data=response) return redirect(self.request.GET.get(REDIRECT_FIELD_NAME) or '/')
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "response", "=", "self", ".", "ulogin_response", "(", "form", ".", "cleaned_data", "[", "'token'", "]", ",", "self", ".", "request", ".", "get_host", "(", ")", ")", "if", "'error'", "in", "response", ":", "return", "render", "(", "self", ".", "request", ",", "self", ".", "error_template_name", ",", "{", "'json'", ":", "response", "}", ")", "if", "user_is_authenticated", "(", "get_user", "(", "self", ".", "request", ")", ")", ":", "user", ",", "identity", ",", "registered", "=", "self", ".", "handle_authenticated_user", "(", "response", ")", "else", ":", "user", ",", "identity", ",", "registered", "=", "self", ".", "handle_anonymous_user", "(", "response", ")", "assign", ".", "send", "(", "sender", "=", "ULoginUser", ",", "user", "=", "get_user", "(", "self", ".", "request", ")", ",", "request", "=", "self", ".", "request", ",", "registered", "=", "registered", ",", "ulogin_user", "=", "identity", ",", "ulogin_data", "=", "response", ")", "return", "redirect", "(", "self", ".", "request", ".", "GET", ".", "get", "(", "REDIRECT_FIELD_NAME", ")", "or", "'/'", ")" ]
The request from ulogin service is correct
[ "The", "request", "from", "ulogin", "service", "is", "correct" ]
f41ad4b4ca130ad8af25be72ad882c8cf94a80dc
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L135-L159
train
marazmiki/django-ulogin
django_ulogin/views.py
PostBackView.ulogin_response
def ulogin_response(self, token, host): """ Makes a request to ULOGIN """ response = requests.get( settings.TOKEN_URL, params={ 'token': token, 'host': host }) content = response.content if sys.version_info >= (3, 0): content = content.decode('utf8') return json.loads(content)
python
def ulogin_response(self, token, host): """ Makes a request to ULOGIN """ response = requests.get( settings.TOKEN_URL, params={ 'token': token, 'host': host }) content = response.content if sys.version_info >= (3, 0): content = content.decode('utf8') return json.loads(content)
[ "def", "ulogin_response", "(", "self", ",", "token", ",", "host", ")", ":", "response", "=", "requests", ".", "get", "(", "settings", ".", "TOKEN_URL", ",", "params", "=", "{", "'token'", ":", "token", ",", "'host'", ":", "host", "}", ")", "content", "=", "response", ".", "content", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "content", "=", "content", ".", "decode", "(", "'utf8'", ")", "return", "json", ".", "loads", "(", "content", ")" ]
Makes a request to ULOGIN
[ "Makes", "a", "request", "to", "ULOGIN" ]
f41ad4b4ca130ad8af25be72ad882c8cf94a80dc
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L167-L182
train
evolbioinfo/pastml
pastml/parsimony.py
initialise_parsimonious_states
def initialise_parsimonious_states(tree, feature, states): """ Initializes the bottom-up state arrays for tips based on their states given by the feature. :param tree: ete3.Tree, tree for which the tip states are to be initialized :param feature: str, feature in which the tip states are stored (the value could be None for a missing state) :param states: numpy array, possible states. :return: void, adds the get_personalised_feature_name(feature, BU_PARS) feature to tree tips. """ ps_feature_down = get_personalized_feature_name(feature, BU_PARS_STATES) ps_feature = get_personalized_feature_name(feature, PARS_STATES) all_states = set(states) for node in tree.traverse(): state = getattr(node, feature, set()) if not state: node.add_feature(ps_feature_down, all_states) else: node.add_feature(ps_feature_down, state) node.add_feature(ps_feature, getattr(node, ps_feature_down))
python
def initialise_parsimonious_states(tree, feature, states): """ Initializes the bottom-up state arrays for tips based on their states given by the feature. :param tree: ete3.Tree, tree for which the tip states are to be initialized :param feature: str, feature in which the tip states are stored (the value could be None for a missing state) :param states: numpy array, possible states. :return: void, adds the get_personalised_feature_name(feature, BU_PARS) feature to tree tips. """ ps_feature_down = get_personalized_feature_name(feature, BU_PARS_STATES) ps_feature = get_personalized_feature_name(feature, PARS_STATES) all_states = set(states) for node in tree.traverse(): state = getattr(node, feature, set()) if not state: node.add_feature(ps_feature_down, all_states) else: node.add_feature(ps_feature_down, state) node.add_feature(ps_feature, getattr(node, ps_feature_down))
[ "def", "initialise_parsimonious_states", "(", "tree", ",", "feature", ",", "states", ")", ":", "ps_feature_down", "=", "get_personalized_feature_name", "(", "feature", ",", "BU_PARS_STATES", ")", "ps_feature", "=", "get_personalized_feature_name", "(", "feature", ",", "PARS_STATES", ")", "all_states", "=", "set", "(", "states", ")", "for", "node", "in", "tree", ".", "traverse", "(", ")", ":", "state", "=", "getattr", "(", "node", ",", "feature", ",", "set", "(", ")", ")", "if", "not", "state", ":", "node", ".", "add_feature", "(", "ps_feature_down", ",", "all_states", ")", "else", ":", "node", ".", "add_feature", "(", "ps_feature_down", ",", "state", ")", "node", ".", "add_feature", "(", "ps_feature", ",", "getattr", "(", "node", ",", "ps_feature_down", ")", ")" ]
Initializes the bottom-up state arrays for tips based on their states given by the feature. :param tree: ete3.Tree, tree for which the tip states are to be initialized :param feature: str, feature in which the tip states are stored (the value could be None for a missing state) :param states: numpy array, possible states. :return: void, adds the get_personalised_feature_name(feature, BU_PARS) feature to tree tips.
[ "Initializes", "the", "bottom", "-", "up", "state", "arrays", "for", "tips", "based", "on", "their", "states", "given", "by", "the", "feature", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L48-L67
train
evolbioinfo/pastml
pastml/parsimony.py
uppass
def uppass(tree, feature): """ UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes """ ps_feature = get_personalized_feature_name(feature, BU_PARS_STATES) for node in tree.traverse('postorder'): if not node.is_leaf(): children_states = get_most_common_states(getattr(child, ps_feature) for child in node.children) node_states = getattr(node, ps_feature) state_intersection = node_states & children_states node.add_feature(ps_feature, state_intersection if state_intersection else node_states)
python
def uppass(tree, feature): """ UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes """ ps_feature = get_personalized_feature_name(feature, BU_PARS_STATES) for node in tree.traverse('postorder'): if not node.is_leaf(): children_states = get_most_common_states(getattr(child, ps_feature) for child in node.children) node_states = getattr(node, ps_feature) state_intersection = node_states & children_states node.add_feature(ps_feature, state_intersection if state_intersection else node_states)
[ "def", "uppass", "(", "tree", ",", "feature", ")", ":", "ps_feature", "=", "get_personalized_feature_name", "(", "feature", ",", "BU_PARS_STATES", ")", "for", "node", "in", "tree", ".", "traverse", "(", "'postorder'", ")", ":", "if", "not", "node", ".", "is_leaf", "(", ")", ":", "children_states", "=", "get_most_common_states", "(", "getattr", "(", "child", ",", "ps_feature", ")", "for", "child", "in", "node", ".", "children", ")", "node_states", "=", "getattr", "(", "node", ",", "ps_feature", ")", "state_intersection", "=", "node_states", "&", "children_states", "node", ".", "add_feature", "(", "ps_feature", ",", "state_intersection", "if", "state_intersection", "else", "node_states", ")" ]
UPPASS traverses the tree starting from the tips and going up till the root, and assigns to each parent node a state based on the states of its child nodes. if N is a tip: S(N) <- state of N else: L, R <- left and right children of N UPPASS(L) UPPASS(R) if S(L) intersects with S(R): S(N) <- intersection(S(L), S(R)) else: S(N) <- union(S(L), S(R)) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, BU_PARS_STATES) feature to the tree nodes
[ "UPPASS", "traverses", "the", "tree", "starting", "from", "the", "tips", "and", "going", "up", "till", "the", "root", "and", "assigns", "to", "each", "parent", "node", "a", "state", "based", "on", "the", "states", "of", "its", "child", "nodes", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L83-L111
train
evolbioinfo/pastml
pastml/parsimony.py
parsimonious_acr
def parsimonious_acr(tree, character, prediction_method, states, num_nodes, num_tips): """ Calculates parsimonious states on the tree and stores them in the corresponding feature. :param states: numpy array of possible states :param prediction_method: str, ACCTRAN (accelerated transformation), DELTRAN (delayed transformation) or DOWNPASS :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: dict, mapping between reconstruction parameters and values """ initialise_parsimonious_states(tree, character, states) uppass(tree, character) results = [] result = {STATES: states, NUM_NODES: num_nodes, NUM_TIPS: num_tips} logger = logging.getLogger('pastml') def process_result(method, feature): out_feature = get_personalized_feature_name(character, method) if prediction_method != method else character res = result.copy() res[NUM_SCENARIOS], res[NUM_UNRESOLVED_NODES], res[NUM_STATES_PER_NODE] \ = choose_parsimonious_states(tree, feature, out_feature) res[NUM_STATES_PER_NODE] /= num_nodes res[PERC_UNRESOLVED] = res[NUM_UNRESOLVED_NODES] * 100 / num_nodes logger.debug('{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' .format(res[NUM_UNRESOLVED_NODES], 's are' if res[NUM_UNRESOLVED_NODES] != 1 else ' is', res[PERC_UNRESOLVED], character, method, res[NUM_STATES_PER_NODE], 's' if res[NUM_STATES_PER_NODE] > 1 else '')) res[CHARACTER] = out_feature res[METHOD] = method results.append(res) if prediction_method in {ACCTRAN, MP}: feature = get_personalized_feature_name(character, PARS_STATES) if prediction_method == MP: feature = get_personalized_feature_name(feature, ACCTRAN) acctran(tree, character, feature) result[STEPS] = get_num_parsimonious_steps(tree, feature) process_result(ACCTRAN, feature) bu_feature = get_personalized_feature_name(character, BU_PARS_STATES) for node in tree.traverse(): if prediction_method == ACCTRAN: node.del_feature(bu_feature) node.del_feature(feature) if prediction_method != ACCTRAN: downpass(tree, character, states) feature = get_personalized_feature_name(character, PARS_STATES) if prediction_method == DOWNPASS: result[STEPS] = get_num_parsimonious_steps(tree, feature) if prediction_method in {DOWNPASS, MP}: process_result(DOWNPASS, feature) if prediction_method in {DELTRAN, MP}: deltran(tree, character) if prediction_method == DELTRAN: result[STEPS] = get_num_parsimonious_steps(tree, feature) process_result(DELTRAN, feature) for node in tree.traverse(): node.del_feature(feature) logger.debug("Parsimonious reconstruction for {} requires {} state changes." .format(character, result[STEPS])) return results
python
def parsimonious_acr(tree, character, prediction_method, states, num_nodes, num_tips): """ Calculates parsimonious states on the tree and stores them in the corresponding feature. :param states: numpy array of possible states :param prediction_method: str, ACCTRAN (accelerated transformation), DELTRAN (delayed transformation) or DOWNPASS :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: dict, mapping between reconstruction parameters and values """ initialise_parsimonious_states(tree, character, states) uppass(tree, character) results = [] result = {STATES: states, NUM_NODES: num_nodes, NUM_TIPS: num_tips} logger = logging.getLogger('pastml') def process_result(method, feature): out_feature = get_personalized_feature_name(character, method) if prediction_method != method else character res = result.copy() res[NUM_SCENARIOS], res[NUM_UNRESOLVED_NODES], res[NUM_STATES_PER_NODE] \ = choose_parsimonious_states(tree, feature, out_feature) res[NUM_STATES_PER_NODE] /= num_nodes res[PERC_UNRESOLVED] = res[NUM_UNRESOLVED_NODES] * 100 / num_nodes logger.debug('{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' .format(res[NUM_UNRESOLVED_NODES], 's are' if res[NUM_UNRESOLVED_NODES] != 1 else ' is', res[PERC_UNRESOLVED], character, method, res[NUM_STATES_PER_NODE], 's' if res[NUM_STATES_PER_NODE] > 1 else '')) res[CHARACTER] = out_feature res[METHOD] = method results.append(res) if prediction_method in {ACCTRAN, MP}: feature = get_personalized_feature_name(character, PARS_STATES) if prediction_method == MP: feature = get_personalized_feature_name(feature, ACCTRAN) acctran(tree, character, feature) result[STEPS] = get_num_parsimonious_steps(tree, feature) process_result(ACCTRAN, feature) bu_feature = get_personalized_feature_name(character, BU_PARS_STATES) for node in tree.traverse(): if prediction_method == ACCTRAN: node.del_feature(bu_feature) node.del_feature(feature) if prediction_method != ACCTRAN: downpass(tree, character, states) feature = get_personalized_feature_name(character, PARS_STATES) if prediction_method == DOWNPASS: result[STEPS] = get_num_parsimonious_steps(tree, feature) if prediction_method in {DOWNPASS, MP}: process_result(DOWNPASS, feature) if prediction_method in {DELTRAN, MP}: deltran(tree, character) if prediction_method == DELTRAN: result[STEPS] = get_num_parsimonious_steps(tree, feature) process_result(DELTRAN, feature) for node in tree.traverse(): node.del_feature(feature) logger.debug("Parsimonious reconstruction for {} requires {} state changes." .format(character, result[STEPS])) return results
[ "def", "parsimonious_acr", "(", "tree", ",", "character", ",", "prediction_method", ",", "states", ",", "num_nodes", ",", "num_tips", ")", ":", "initialise_parsimonious_states", "(", "tree", ",", "character", ",", "states", ")", "uppass", "(", "tree", ",", "character", ")", "results", "=", "[", "]", "result", "=", "{", "STATES", ":", "states", ",", "NUM_NODES", ":", "num_nodes", ",", "NUM_TIPS", ":", "num_tips", "}", "logger", "=", "logging", ".", "getLogger", "(", "'pastml'", ")", "def", "process_result", "(", "method", ",", "feature", ")", ":", "out_feature", "=", "get_personalized_feature_name", "(", "character", ",", "method", ")", "if", "prediction_method", "!=", "method", "else", "character", "res", "=", "result", ".", "copy", "(", ")", "res", "[", "NUM_SCENARIOS", "]", ",", "res", "[", "NUM_UNRESOLVED_NODES", "]", ",", "res", "[", "NUM_STATES_PER_NODE", "]", "=", "choose_parsimonious_states", "(", "tree", ",", "feature", ",", "out_feature", ")", "res", "[", "NUM_STATES_PER_NODE", "]", "/=", "num_nodes", "res", "[", "PERC_UNRESOLVED", "]", "=", "res", "[", "NUM_UNRESOLVED_NODES", "]", "*", "100", "/", "num_nodes", "logger", ".", "debug", "(", "'{} node{} unresolved ({:.2f}%) for {} by {}, '", "'i.e. {:.4f} state{} per node in average.'", ".", "format", "(", "res", "[", "NUM_UNRESOLVED_NODES", "]", ",", "'s are'", "if", "res", "[", "NUM_UNRESOLVED_NODES", "]", "!=", "1", "else", "' is'", ",", "res", "[", "PERC_UNRESOLVED", "]", ",", "character", ",", "method", ",", "res", "[", "NUM_STATES_PER_NODE", "]", ",", "'s'", "if", "res", "[", "NUM_STATES_PER_NODE", "]", ">", "1", "else", "''", ")", ")", "res", "[", "CHARACTER", "]", "=", "out_feature", "res", "[", "METHOD", "]", "=", "method", "results", ".", "append", "(", "res", ")", "if", "prediction_method", "in", "{", "ACCTRAN", ",", "MP", "}", ":", "feature", "=", "get_personalized_feature_name", "(", "character", ",", "PARS_STATES", ")", "if", "prediction_method", "==", "MP", ":", "feature", "=", "get_personalized_feature_name", "(", "feature", ",", "ACCTRAN", ")", "acctran", "(", "tree", ",", "character", ",", "feature", ")", "result", "[", "STEPS", "]", "=", "get_num_parsimonious_steps", "(", "tree", ",", "feature", ")", "process_result", "(", "ACCTRAN", ",", "feature", ")", "bu_feature", "=", "get_personalized_feature_name", "(", "character", ",", "BU_PARS_STATES", ")", "for", "node", "in", "tree", ".", "traverse", "(", ")", ":", "if", "prediction_method", "==", "ACCTRAN", ":", "node", ".", "del_feature", "(", "bu_feature", ")", "node", ".", "del_feature", "(", "feature", ")", "if", "prediction_method", "!=", "ACCTRAN", ":", "downpass", "(", "tree", ",", "character", ",", "states", ")", "feature", "=", "get_personalized_feature_name", "(", "character", ",", "PARS_STATES", ")", "if", "prediction_method", "==", "DOWNPASS", ":", "result", "[", "STEPS", "]", "=", "get_num_parsimonious_steps", "(", "tree", ",", "feature", ")", "if", "prediction_method", "in", "{", "DOWNPASS", ",", "MP", "}", ":", "process_result", "(", "DOWNPASS", ",", "feature", ")", "if", "prediction_method", "in", "{", "DELTRAN", ",", "MP", "}", ":", "deltran", "(", "tree", ",", "character", ")", "if", "prediction_method", "==", "DELTRAN", ":", "result", "[", "STEPS", "]", "=", "get_num_parsimonious_steps", "(", "tree", ",", "feature", ")", "process_result", "(", "DELTRAN", ",", "feature", ")", "for", "node", "in", "tree", ".", "traverse", "(", ")", ":", "node", ".", "del_feature", "(", "feature", ")", "logger", ".", "debug", "(", "\"Parsimonious reconstruction for {} requires {} state changes.\"", ".", "format", "(", "character", ",", "result", "[", "STEPS", "]", ")", ")", "return", "results" ]
Calculates parsimonious states on the tree and stores them in the corresponding feature. :param states: numpy array of possible states :param prediction_method: str, ACCTRAN (accelerated transformation), DELTRAN (delayed transformation) or DOWNPASS :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: dict, mapping between reconstruction parameters and values
[ "Calculates", "parsimonious", "states", "on", "the", "tree", "and", "stores", "them", "in", "the", "corresponding", "feature", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L224-L289
train
frasertweedale/ledgertools
ltlib/chart.py
balance_to_ringchart_items
def balance_to_ringchart_items(balance, account='', show=SHOW_CREDIT): """Convert a balance data structure into RingChartItem objects.""" show = show if show else SHOW_CREDIT # cannot show all in ring chart rcis = [] for item in balance: subaccount = item['account_fragment'] if not account \ else ':'.join((account, item['account_fragment'])) ch = balance_to_ringchart_items(item['children'], subaccount, show) amount = item['balance'] if show == SHOW_CREDIT else -item['balance'] if amount < 0: continue # omit negative amounts wedge_amount = max(amount, sum(map(float, ch))) rci = gtkchartlib.ringchart.RingChartItem( wedge_amount, tooltip='{}\n{}'.format(subaccount, wedge_amount), items=ch ) rcis.append(rci) return rcis
python
def balance_to_ringchart_items(balance, account='', show=SHOW_CREDIT): """Convert a balance data structure into RingChartItem objects.""" show = show if show else SHOW_CREDIT # cannot show all in ring chart rcis = [] for item in balance: subaccount = item['account_fragment'] if not account \ else ':'.join((account, item['account_fragment'])) ch = balance_to_ringchart_items(item['children'], subaccount, show) amount = item['balance'] if show == SHOW_CREDIT else -item['balance'] if amount < 0: continue # omit negative amounts wedge_amount = max(amount, sum(map(float, ch))) rci = gtkchartlib.ringchart.RingChartItem( wedge_amount, tooltip='{}\n{}'.format(subaccount, wedge_amount), items=ch ) rcis.append(rci) return rcis
[ "def", "balance_to_ringchart_items", "(", "balance", ",", "account", "=", "''", ",", "show", "=", "SHOW_CREDIT", ")", ":", "show", "=", "show", "if", "show", "else", "SHOW_CREDIT", "# cannot show all in ring chart", "rcis", "=", "[", "]", "for", "item", "in", "balance", ":", "subaccount", "=", "item", "[", "'account_fragment'", "]", "if", "not", "account", "else", "':'", ".", "join", "(", "(", "account", ",", "item", "[", "'account_fragment'", "]", ")", ")", "ch", "=", "balance_to_ringchart_items", "(", "item", "[", "'children'", "]", ",", "subaccount", ",", "show", ")", "amount", "=", "item", "[", "'balance'", "]", "if", "show", "==", "SHOW_CREDIT", "else", "-", "item", "[", "'balance'", "]", "if", "amount", "<", "0", ":", "continue", "# omit negative amounts", "wedge_amount", "=", "max", "(", "amount", ",", "sum", "(", "map", "(", "float", ",", "ch", ")", ")", ")", "rci", "=", "gtkchartlib", ".", "ringchart", ".", "RingChartItem", "(", "wedge_amount", ",", "tooltip", "=", "'{}\\n{}'", ".", "format", "(", "subaccount", ",", "wedge_amount", ")", ",", "items", "=", "ch", ")", "rcis", ".", "append", "(", "rci", ")", "return", "rcis" ]
Convert a balance data structure into RingChartItem objects.
[ "Convert", "a", "balance", "data", "structure", "into", "RingChartItem", "objects", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/chart.py#L27-L45
train
orlandodiaz/log3
log3/log.py
log_to_file
def log_to_file(log_path, log_urllib=False, limit=None): """ Add file_handler to logger""" log_path = log_path file_handler = logging.FileHandler(log_path) if limit: file_handler = RotatingFileHandler( log_path, mode='a', maxBytes=limit * 1024 * 1024, backupCount=2, encoding=None, delay=0) fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s' date_fmt = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(fmt, datefmt=date_fmt) file_handler.setFormatter(formatter) logger.addHandler(file_handler) if log_urllib: urllib_logger.addHandler(file_handler) urllib_logger.setLevel(logging.DEBUG)
python
def log_to_file(log_path, log_urllib=False, limit=None): """ Add file_handler to logger""" log_path = log_path file_handler = logging.FileHandler(log_path) if limit: file_handler = RotatingFileHandler( log_path, mode='a', maxBytes=limit * 1024 * 1024, backupCount=2, encoding=None, delay=0) fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s' date_fmt = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(fmt, datefmt=date_fmt) file_handler.setFormatter(formatter) logger.addHandler(file_handler) if log_urllib: urllib_logger.addHandler(file_handler) urllib_logger.setLevel(logging.DEBUG)
[ "def", "log_to_file", "(", "log_path", ",", "log_urllib", "=", "False", ",", "limit", "=", "None", ")", ":", "log_path", "=", "log_path", "file_handler", "=", "logging", ".", "FileHandler", "(", "log_path", ")", "if", "limit", ":", "file_handler", "=", "RotatingFileHandler", "(", "log_path", ",", "mode", "=", "'a'", ",", "maxBytes", "=", "limit", "*", "1024", "*", "1024", ",", "backupCount", "=", "2", ",", "encoding", "=", "None", ",", "delay", "=", "0", ")", "fmt", "=", "'[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s'", "date_fmt", "=", "'%Y-%m-%d %H:%M:%S'", "formatter", "=", "logging", ".", "Formatter", "(", "fmt", ",", "datefmt", "=", "date_fmt", ")", "file_handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "file_handler", ")", "if", "log_urllib", ":", "urllib_logger", ".", "addHandler", "(", "file_handler", ")", "urllib_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")" ]
Add file_handler to logger
[ "Add", "file_handler", "to", "logger" ]
aeedf83159be8dd3d4757e0d9240f9cdbc9c3ea2
https://github.com/orlandodiaz/log3/blob/aeedf83159be8dd3d4757e0d9240f9cdbc9c3ea2/log3/log.py#L34-L53
train
YosaiProject/yosai_alchemystore
yosai_alchemystore/accountstore/accountstore.py
session_context
def session_context(fn): """ Handles session setup and teardown """ @functools.wraps(fn) def wrap(*args, **kwargs): session = args[0].Session() # obtain from self result = fn(*args, session=session, **kwargs) session.close() return result return wrap
python
def session_context(fn): """ Handles session setup and teardown """ @functools.wraps(fn) def wrap(*args, **kwargs): session = args[0].Session() # obtain from self result = fn(*args, session=session, **kwargs) session.close() return result return wrap
[ "def", "session_context", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "session", "=", "args", "[", "0", "]", ".", "Session", "(", ")", "# obtain from self", "result", "=", "fn", "(", "*", "args", ",", "session", "=", "session", ",", "*", "*", "kwargs", ")", "session", ".", "close", "(", ")", "return", "result", "return", "wrap" ]
Handles session setup and teardown
[ "Handles", "session", "setup", "and", "teardown" ]
6479c159ab2ac357e6b70cdd71a2d673279e86bb
https://github.com/YosaiProject/yosai_alchemystore/blob/6479c159ab2ac357e6b70cdd71a2d673279e86bb/yosai_alchemystore/accountstore/accountstore.py#L66-L76
train
geophysics-ubonn/reda
lib/reda/exporters/syscal.py
_syscal_write_electrode_coords
def _syscal_write_electrode_coords(fid, spacing, N): """helper function that writes out electrode positions to a file descriptor Parameters ---------- fid: file descriptor data is written here spacing: float spacing of electrodes N: int number of electrodes """ fid.write('# X Y Z\n') for i in range(0, N): fid.write('{0} {1} {2} {3}\n'.format(i + 1, i * spacing, 0, 0))
python
def _syscal_write_electrode_coords(fid, spacing, N): """helper function that writes out electrode positions to a file descriptor Parameters ---------- fid: file descriptor data is written here spacing: float spacing of electrodes N: int number of electrodes """ fid.write('# X Y Z\n') for i in range(0, N): fid.write('{0} {1} {2} {3}\n'.format(i + 1, i * spacing, 0, 0))
[ "def", "_syscal_write_electrode_coords", "(", "fid", ",", "spacing", ",", "N", ")", ":", "fid", ".", "write", "(", "'# X Y Z\\n'", ")", "for", "i", "in", "range", "(", "0", ",", "N", ")", ":", "fid", ".", "write", "(", "'{0} {1} {2} {3}\\n'", ".", "format", "(", "i", "+", "1", ",", "i", "*", "spacing", ",", "0", ",", "0", ")", ")" ]
helper function that writes out electrode positions to a file descriptor Parameters ---------- fid: file descriptor data is written here spacing: float spacing of electrodes N: int number of electrodes
[ "helper", "function", "that", "writes", "out", "electrode", "positions", "to", "a", "file", "descriptor" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L5-L19
train
geophysics-ubonn/reda
lib/reda/exporters/syscal.py
_syscal_write_quadpoles
def _syscal_write_quadpoles(fid, quadpoles): """helper function that writes the actual measurement configurations to a file descriptor. Parameters ---------- fid: file descriptor data is written here quadpoles: numpy.ndarray measurement configurations """ fid.write('# A B M N\n') for nr, quadpole in enumerate(quadpoles): fid.write( '{0} {1} {2} {3} {4}\n'.format( nr, quadpole[0], quadpole[1], quadpole[2], quadpole[3]))
python
def _syscal_write_quadpoles(fid, quadpoles): """helper function that writes the actual measurement configurations to a file descriptor. Parameters ---------- fid: file descriptor data is written here quadpoles: numpy.ndarray measurement configurations """ fid.write('# A B M N\n') for nr, quadpole in enumerate(quadpoles): fid.write( '{0} {1} {2} {3} {4}\n'.format( nr, quadpole[0], quadpole[1], quadpole[2], quadpole[3]))
[ "def", "_syscal_write_quadpoles", "(", "fid", ",", "quadpoles", ")", ":", "fid", ".", "write", "(", "'# A B M N\\n'", ")", "for", "nr", ",", "quadpole", "in", "enumerate", "(", "quadpoles", ")", ":", "fid", ".", "write", "(", "'{0} {1} {2} {3} {4}\\n'", ".", "format", "(", "nr", ",", "quadpole", "[", "0", "]", ",", "quadpole", "[", "1", "]", ",", "quadpole", "[", "2", "]", ",", "quadpole", "[", "3", "]", ")", ")" ]
helper function that writes the actual measurement configurations to a file descriptor. Parameters ---------- fid: file descriptor data is written here quadpoles: numpy.ndarray measurement configurations
[ "helper", "function", "that", "writes", "the", "actual", "measurement", "configurations", "to", "a", "file", "descriptor", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L22-L38
train
geophysics-ubonn/reda
lib/reda/exporters/syscal.py
syscal_save_to_config_txt
def syscal_save_to_config_txt(filename, configs, spacing=1): """Write configurations to a Syscal ascii file that can be read by the Electre Pro program. Parameters ---------- filename: string output filename configs: numpy.ndarray Nx4 array with measurement configurations A-B-M-N """ print('Number of measurements: ', configs.shape[0]) number_of_electrodes = configs.max().astype(int) with open(filename, 'w') as fid: _syscal_write_electrode_coords(fid, spacing, number_of_electrodes) _syscal_write_quadpoles(fid, configs.astype(int))
python
def syscal_save_to_config_txt(filename, configs, spacing=1): """Write configurations to a Syscal ascii file that can be read by the Electre Pro program. Parameters ---------- filename: string output filename configs: numpy.ndarray Nx4 array with measurement configurations A-B-M-N """ print('Number of measurements: ', configs.shape[0]) number_of_electrodes = configs.max().astype(int) with open(filename, 'w') as fid: _syscal_write_electrode_coords(fid, spacing, number_of_electrodes) _syscal_write_quadpoles(fid, configs.astype(int))
[ "def", "syscal_save_to_config_txt", "(", "filename", ",", "configs", ",", "spacing", "=", "1", ")", ":", "print", "(", "'Number of measurements: '", ",", "configs", ".", "shape", "[", "0", "]", ")", "number_of_electrodes", "=", "configs", ".", "max", "(", ")", ".", "astype", "(", "int", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fid", ":", "_syscal_write_electrode_coords", "(", "fid", ",", "spacing", ",", "number_of_electrodes", ")", "_syscal_write_quadpoles", "(", "fid", ",", "configs", ".", "astype", "(", "int", ")", ")" ]
Write configurations to a Syscal ascii file that can be read by the Electre Pro program. Parameters ---------- filename: string output filename configs: numpy.ndarray Nx4 array with measurement configurations A-B-M-N
[ "Write", "configurations", "to", "a", "Syscal", "ascii", "file", "that", "can", "be", "read", "by", "the", "Electre", "Pro", "program", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/syscal.py#L41-L58
train
geophysics-ubonn/reda
lib/reda/utils/mpl.py
setup
def setup(use_latex=False, overwrite=False): """Set up matplotlib imports and settings. Parameters ---------- use_latex: bool, optional Determine if Latex output should be used. Latex will only be enable if a 'latex' binary is found in the system. overwrite: bool, optional Overwrite some matplotlib config values. Returns ------- plt: :mod:`pylab` pylab module imported as plt mpl: :mod:`matplotlib` matplotlib module imported as mpl """ # just make sure we can access matplotlib as mpl import matplotlib as mpl # general settings if overwrite: mpl.rcParams["lines.linewidth"] = 2.0 mpl.rcParams["lines.markeredgewidth"] = 3.0 mpl.rcParams["lines.markersize"] = 3.0 mpl.rcParams["font.size"] = 12 mpl.rcParams['mathtext.default'] = 'regular' if latex and use_latex: mpl.rcParams['text.usetex'] = True mpl.rc( 'text.latex', preamble=''.join(( # r'\usepackage{droidsans} r'\usepackage[T1]{fontenc} ', r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}', r'\renewcommand\familydefault{\sfdefault} ', r'\usepackage{mathastext} ' )) ) else: mpl.rcParams['text.usetex'] = False import matplotlib.pyplot as plt return plt, mpl
python
def setup(use_latex=False, overwrite=False): """Set up matplotlib imports and settings. Parameters ---------- use_latex: bool, optional Determine if Latex output should be used. Latex will only be enable if a 'latex' binary is found in the system. overwrite: bool, optional Overwrite some matplotlib config values. Returns ------- plt: :mod:`pylab` pylab module imported as plt mpl: :mod:`matplotlib` matplotlib module imported as mpl """ # just make sure we can access matplotlib as mpl import matplotlib as mpl # general settings if overwrite: mpl.rcParams["lines.linewidth"] = 2.0 mpl.rcParams["lines.markeredgewidth"] = 3.0 mpl.rcParams["lines.markersize"] = 3.0 mpl.rcParams["font.size"] = 12 mpl.rcParams['mathtext.default'] = 'regular' if latex and use_latex: mpl.rcParams['text.usetex'] = True mpl.rc( 'text.latex', preamble=''.join(( # r'\usepackage{droidsans} r'\usepackage[T1]{fontenc} ', r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}', r'\renewcommand\familydefault{\sfdefault} ', r'\usepackage{mathastext} ' )) ) else: mpl.rcParams['text.usetex'] = False import matplotlib.pyplot as plt return plt, mpl
[ "def", "setup", "(", "use_latex", "=", "False", ",", "overwrite", "=", "False", ")", ":", "# just make sure we can access matplotlib as mpl", "import", "matplotlib", "as", "mpl", "# general settings", "if", "overwrite", ":", "mpl", ".", "rcParams", "[", "\"lines.linewidth\"", "]", "=", "2.0", "mpl", ".", "rcParams", "[", "\"lines.markeredgewidth\"", "]", "=", "3.0", "mpl", ".", "rcParams", "[", "\"lines.markersize\"", "]", "=", "3.0", "mpl", ".", "rcParams", "[", "\"font.size\"", "]", "=", "12", "mpl", ".", "rcParams", "[", "'mathtext.default'", "]", "=", "'regular'", "if", "latex", "and", "use_latex", ":", "mpl", ".", "rcParams", "[", "'text.usetex'", "]", "=", "True", "mpl", ".", "rc", "(", "'text.latex'", ",", "preamble", "=", "''", ".", "join", "(", "(", "# r'\\usepackage{droidsans}", "r'\\usepackage[T1]{fontenc} '", ",", "r'\\usepackage{sfmath} \\renewcommand{\\rmfamily}{\\sffamily}'", ",", "r'\\renewcommand\\familydefault{\\sfdefault} '", ",", "r'\\usepackage{mathastext} '", ")", ")", ")", "else", ":", "mpl", ".", "rcParams", "[", "'text.usetex'", "]", "=", "False", "import", "matplotlib", ".", "pyplot", "as", "plt", "return", "plt", ",", "mpl" ]
Set up matplotlib imports and settings. Parameters ---------- use_latex: bool, optional Determine if Latex output should be used. Latex will only be enable if a 'latex' binary is found in the system. overwrite: bool, optional Overwrite some matplotlib config values. Returns ------- plt: :mod:`pylab` pylab module imported as plt mpl: :mod:`matplotlib` matplotlib module imported as mpl
[ "Set", "up", "matplotlib", "imports", "and", "settings", "." ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/mpl.py#L19-L64
train
geophysics-ubonn/reda
lib/reda/importers/crtomo.py
load_seit_data
def load_seit_data(directory, frequency_file='frequencies.dat', data_prefix='volt_', **kwargs): """Load sEIT data from data directory. This function loads data previously exported from reda using reda.exporters.crtomo.write_files_to_directory Parameters ---------- directory : string input directory frequency_file : string, optional file (located in directory) that contains the frequencies data_prefix: string, optional for each frequency a corresponding data file must be present in the input directory. Frequencies and files are matched by sorting the frequencies AND the filenames, retrieved using glob and the data_prefix Returns ------- df : pandas.DataFrame A DataFrame suitable for the sEIT container electrodes : None No electrode data is imported topography : None No topography data is imported """ frequencies = np.loadtxt(directory + os.sep + frequency_file) data_files = sorted(glob(directory + os.sep + data_prefix + '*')) # check that the number of frequencies matches the number of data files if frequencies.size != len(data_files): raise Exception( 'number of frequencies does not match number of data files') # load data data_list = [] for frequency, filename in zip(frequencies, data_files): subdata = load_mod_file(filename) subdata['frequency'] = frequency data_list.append(subdata) df = pd.concat(data_list) return df, None, None
python
def load_seit_data(directory, frequency_file='frequencies.dat', data_prefix='volt_', **kwargs): """Load sEIT data from data directory. This function loads data previously exported from reda using reda.exporters.crtomo.write_files_to_directory Parameters ---------- directory : string input directory frequency_file : string, optional file (located in directory) that contains the frequencies data_prefix: string, optional for each frequency a corresponding data file must be present in the input directory. Frequencies and files are matched by sorting the frequencies AND the filenames, retrieved using glob and the data_prefix Returns ------- df : pandas.DataFrame A DataFrame suitable for the sEIT container electrodes : None No electrode data is imported topography : None No topography data is imported """ frequencies = np.loadtxt(directory + os.sep + frequency_file) data_files = sorted(glob(directory + os.sep + data_prefix + '*')) # check that the number of frequencies matches the number of data files if frequencies.size != len(data_files): raise Exception( 'number of frequencies does not match number of data files') # load data data_list = [] for frequency, filename in zip(frequencies, data_files): subdata = load_mod_file(filename) subdata['frequency'] = frequency data_list.append(subdata) df = pd.concat(data_list) return df, None, None
[ "def", "load_seit_data", "(", "directory", ",", "frequency_file", "=", "'frequencies.dat'", ",", "data_prefix", "=", "'volt_'", ",", "*", "*", "kwargs", ")", ":", "frequencies", "=", "np", ".", "loadtxt", "(", "directory", "+", "os", ".", "sep", "+", "frequency_file", ")", "data_files", "=", "sorted", "(", "glob", "(", "directory", "+", "os", ".", "sep", "+", "data_prefix", "+", "'*'", ")", ")", "# check that the number of frequencies matches the number of data files", "if", "frequencies", ".", "size", "!=", "len", "(", "data_files", ")", ":", "raise", "Exception", "(", "'number of frequencies does not match number of data files'", ")", "# load data", "data_list", "=", "[", "]", "for", "frequency", ",", "filename", "in", "zip", "(", "frequencies", ",", "data_files", ")", ":", "subdata", "=", "load_mod_file", "(", "filename", ")", "subdata", "[", "'frequency'", "]", "=", "frequency", "data_list", ".", "append", "(", "subdata", ")", "df", "=", "pd", ".", "concat", "(", "data_list", ")", "return", "df", ",", "None", ",", "None" ]
Load sEIT data from data directory. This function loads data previously exported from reda using reda.exporters.crtomo.write_files_to_directory Parameters ---------- directory : string input directory frequency_file : string, optional file (located in directory) that contains the frequencies data_prefix: string, optional for each frequency a corresponding data file must be present in the input directory. Frequencies and files are matched by sorting the frequencies AND the filenames, retrieved using glob and the data_prefix Returns ------- df : pandas.DataFrame A DataFrame suitable for the sEIT container electrodes : None No electrode data is imported topography : None No topography data is imported
[ "Load", "sEIT", "data", "from", "data", "directory", ".", "This", "function", "loads", "data", "previously", "exported", "from", "reda", "using", "reda", ".", "exporters", ".", "crtomo", ".", "write_files_to_directory" ]
46a939729e40c7c4723315c03679c40761152e9e
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/crtomo.py#L59-L100
train
evolbioinfo/pastml
pastml/models/generator.py
get_diagonalisation
def get_diagonalisation(frequencies, rate_matrix=None): """ Normalises and diagonalises the rate matrix. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: matrix diagonalisation (d, A, A^{-1}) such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator) :rtype: tuple """ Q = get_normalised_generator(frequencies, rate_matrix) d, A = np.linalg.eig(Q) return d, A, np.linalg.inv(A)
python
def get_diagonalisation(frequencies, rate_matrix=None): """ Normalises and diagonalises the rate matrix. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: matrix diagonalisation (d, A, A^{-1}) such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator) :rtype: tuple """ Q = get_normalised_generator(frequencies, rate_matrix) d, A = np.linalg.eig(Q) return d, A, np.linalg.inv(A)
[ "def", "get_diagonalisation", "(", "frequencies", ",", "rate_matrix", "=", "None", ")", ":", "Q", "=", "get_normalised_generator", "(", "frequencies", ",", "rate_matrix", ")", "d", ",", "A", "=", "np", ".", "linalg", ".", "eig", "(", "Q", ")", "return", "d", ",", "A", ",", "np", ".", "linalg", ".", "inv", "(", "A", ")" ]
Normalises and diagonalises the rate matrix. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: matrix diagonalisation (d, A, A^{-1}) such that A.dot(np.diag(d))).dot(A^{-1}) = 1/mu Q (normalised generator) :rtype: tuple
[ "Normalises", "and", "diagonalises", "the", "rate", "matrix", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L4-L18
train
evolbioinfo/pastml
pastml/models/generator.py
get_normalised_generator
def get_normalised_generator(frequencies, rate_matrix=None): """ Calculates the normalised generator from the rate matrix and character state frequencies. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: normalised generator 1/mu Q :rtype: numpy.ndarray """ if rate_matrix is None: n = len(frequencies) rate_matrix = np.ones(shape=(n, n), dtype=np.float64) - np.eye(n) generator = rate_matrix * frequencies generator -= np.diag(generator.sum(axis=1)) mu = -generator.diagonal().dot(frequencies) generator /= mu return generator
python
def get_normalised_generator(frequencies, rate_matrix=None): """ Calculates the normalised generator from the rate matrix and character state frequencies. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: normalised generator 1/mu Q :rtype: numpy.ndarray """ if rate_matrix is None: n = len(frequencies) rate_matrix = np.ones(shape=(n, n), dtype=np.float64) - np.eye(n) generator = rate_matrix * frequencies generator -= np.diag(generator.sum(axis=1)) mu = -generator.diagonal().dot(frequencies) generator /= mu return generator
[ "def", "get_normalised_generator", "(", "frequencies", ",", "rate_matrix", "=", "None", ")", ":", "if", "rate_matrix", "is", "None", ":", "n", "=", "len", "(", "frequencies", ")", "rate_matrix", "=", "np", ".", "ones", "(", "shape", "=", "(", "n", ",", "n", ")", ",", "dtype", "=", "np", ".", "float64", ")", "-", "np", ".", "eye", "(", "n", ")", "generator", "=", "rate_matrix", "*", "frequencies", "generator", "-=", "np", ".", "diag", "(", "generator", ".", "sum", "(", "axis", "=", "1", ")", ")", "mu", "=", "-", "generator", ".", "diagonal", "(", ")", ".", "dot", "(", "frequencies", ")", "generator", "/=", "mu", "return", "generator" ]
Calculates the normalised generator from the rate matrix and character state frequencies. :param frequencies: character state frequencies. :type frequencies: numpy.array :param rate_matrix: (optional) rate matrix (by default an all-equal-rate matrix is used) :type rate_matrix: numpy.ndarray :return: normalised generator 1/mu Q :rtype: numpy.ndarray
[ "Calculates", "the", "normalised", "generator", "from", "the", "rate", "matrix", "and", "character", "state", "frequencies", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L21-L39
train
evolbioinfo/pastml
pastml/models/generator.py
get_pij_matrix
def get_pij_matrix(t, diag, A, A_inv): """ Calculates the probability matrix of substitutions i->j over time t, given the normalised generator diagonalisation. :param t: time :type t: float :return: probability matrix :rtype: numpy.ndarray """ return A.dot(np.diag(np.exp(diag * t))).dot(A_inv)
python
def get_pij_matrix(t, diag, A, A_inv): """ Calculates the probability matrix of substitutions i->j over time t, given the normalised generator diagonalisation. :param t: time :type t: float :return: probability matrix :rtype: numpy.ndarray """ return A.dot(np.diag(np.exp(diag * t))).dot(A_inv)
[ "def", "get_pij_matrix", "(", "t", ",", "diag", ",", "A", ",", "A_inv", ")", ":", "return", "A", ".", "dot", "(", "np", ".", "diag", "(", "np", ".", "exp", "(", "diag", "*", "t", ")", ")", ")", ".", "dot", "(", "A_inv", ")" ]
Calculates the probability matrix of substitutions i->j over time t, given the normalised generator diagonalisation. :param t: time :type t: float :return: probability matrix :rtype: numpy.ndarray
[ "Calculates", "the", "probability", "matrix", "of", "substitutions", "i", "-", ">", "j", "over", "time", "t", "given", "the", "normalised", "generator", "diagonalisation", "." ]
df8a375841525738383e59548eed3441b07dbd3e
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/models/generator.py#L42-L53
train
lambdalisue/notify
src/notify/arguments.py
split_arguments
def split_arguments(args): """ Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments """ prev = False for i, value in enumerate(args[1:]): if value.startswith('-'): prev = True elif prev: prev = False else: return args[:i+1], args[i+1:] return args, []
python
def split_arguments(args): """ Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments """ prev = False for i, value in enumerate(args[1:]): if value.startswith('-'): prev = True elif prev: prev = False else: return args[:i+1], args[i+1:] return args, []
[ "def", "split_arguments", "(", "args", ")", ":", "prev", "=", "False", "for", "i", ",", "value", "in", "enumerate", "(", "args", "[", "1", ":", "]", ")", ":", "if", "value", ".", "startswith", "(", "'-'", ")", ":", "prev", "=", "True", "elif", "prev", ":", "prev", "=", "False", "else", ":", "return", "args", "[", ":", "i", "+", "1", "]", ",", "args", "[", "i", "+", "1", ":", "]", "return", "args", ",", "[", "]" ]
Split specified arguments to two list. This is used to distinguish the options of the program and execution command/arguments. Parameters ---------- args : list Command line arguments Returns ------- list : options, arguments options indicate the optional arguments for the program and arguments indicate the execution command/arguments
[ "Split", "specified", "arguments", "to", "two", "list", "." ]
1b6d7d1faa2cea13bfaa1f35130f279a0115e686
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/arguments.py#L8-L34
train
lambdalisue/notify
src/notify/arguments.py
parse_arguments
def parse_arguments(args, config): """ Parse specified arguments via config Parameters ---------- args : list Command line arguments config : object ConfigParser instance which values are used as default values of options Returns ------- list : arguments, options options indicate the return value of ArgumentParser and arguments indicate the execution command/arguments """ import notify from conf import config_to_options opts = config_to_options(config) usage = ("%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS") % {'prog': "notify"} description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse.OptionParser( usage=usage, description=description, version=notify.__version__) parser.add_option('-t', '--to-addr', default=opts.to_addr, help=('Destination of the email.')) parser.add_option('-f', '--from-addr', default=opts.from_addr, help=('Source of the email.')) parser.add_option('-s', '--subject', default=opts.subject, help=('Subject of the email')) parser.add_option('-e', '--encoding', default=opts.encoding, help=('Encoding of the email')) parser.add_option('-o', '--host', default=opts.host, help=('Host address of MUA')) parser.add_option('-p', '--port', type='int', default=opts.port, help=('Port number of MUA')) parser.add_option('--username', default=opts.username, help=('Username for authentication')) parser.add_option('--password', help=('Password for authentication')) parser.add_option('--setup', default=False, action='store_true', help=('Setup %(prog)s configuration')) parser.add_option('--check', default=False, action='store_true', help=('Send %(prog)s configuration via email for ' 'checking. Only for Unix system.')) # display help and exit if len(args) == 1: parser.print_help() sys.exit(0) else: # translate all specified arguments to unicode if sys.version_info < (3,): encoding = sys.stdout.encoding args = map(lambda x: unicode(x, encoding), args) # split argv to two array lhs, rhs = split_arguments(args) # parse options opts = parser.parse_args(args=lhs[1:])[0] return rhs, opts
python
def parse_arguments(args, config): """ Parse specified arguments via config Parameters ---------- args : list Command line arguments config : object ConfigParser instance which values are used as default values of options Returns ------- list : arguments, options options indicate the return value of ArgumentParser and arguments indicate the execution command/arguments """ import notify from conf import config_to_options opts = config_to_options(config) usage = ("%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS") % {'prog': "notify"} description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse.OptionParser( usage=usage, description=description, version=notify.__version__) parser.add_option('-t', '--to-addr', default=opts.to_addr, help=('Destination of the email.')) parser.add_option('-f', '--from-addr', default=opts.from_addr, help=('Source of the email.')) parser.add_option('-s', '--subject', default=opts.subject, help=('Subject of the email')) parser.add_option('-e', '--encoding', default=opts.encoding, help=('Encoding of the email')) parser.add_option('-o', '--host', default=opts.host, help=('Host address of MUA')) parser.add_option('-p', '--port', type='int', default=opts.port, help=('Port number of MUA')) parser.add_option('--username', default=opts.username, help=('Username for authentication')) parser.add_option('--password', help=('Password for authentication')) parser.add_option('--setup', default=False, action='store_true', help=('Setup %(prog)s configuration')) parser.add_option('--check', default=False, action='store_true', help=('Send %(prog)s configuration via email for ' 'checking. Only for Unix system.')) # display help and exit if len(args) == 1: parser.print_help() sys.exit(0) else: # translate all specified arguments to unicode if sys.version_info < (3,): encoding = sys.stdout.encoding args = map(lambda x: unicode(x, encoding), args) # split argv to two array lhs, rhs = split_arguments(args) # parse options opts = parser.parse_args(args=lhs[1:])[0] return rhs, opts
[ "def", "parse_arguments", "(", "args", ",", "config", ")", ":", "import", "notify", "from", "conf", "import", "config_to_options", "opts", "=", "config_to_options", "(", "config", ")", "usage", "=", "(", "\"%(prog)s \"", "\"[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\\n\"", "\" \"", "\"[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\\n\"", "\" \"", "\"[--setup] [--check] COMMAND ARGUMENTS\"", ")", "%", "{", "'prog'", ":", "\"notify\"", "}", "description", "=", "\"\"\"\n Call COMMAND with ARGUMENTS and send notification email to TO_ADDR\n \"\"\"", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "usage", ",", "description", "=", "description", ",", "version", "=", "notify", ".", "__version__", ")", "parser", ".", "add_option", "(", "'-t'", ",", "'--to-addr'", ",", "default", "=", "opts", ".", "to_addr", ",", "help", "=", "(", "'Destination of the email.'", ")", ")", "parser", ".", "add_option", "(", "'-f'", ",", "'--from-addr'", ",", "default", "=", "opts", ".", "from_addr", ",", "help", "=", "(", "'Source of the email.'", ")", ")", "parser", ".", "add_option", "(", "'-s'", ",", "'--subject'", ",", "default", "=", "opts", ".", "subject", ",", "help", "=", "(", "'Subject of the email'", ")", ")", "parser", ".", "add_option", "(", "'-e'", ",", "'--encoding'", ",", "default", "=", "opts", ".", "encoding", ",", "help", "=", "(", "'Encoding of the email'", ")", ")", "parser", ".", "add_option", "(", "'-o'", ",", "'--host'", ",", "default", "=", "opts", ".", "host", ",", "help", "=", "(", "'Host address of MUA'", ")", ")", "parser", ".", "add_option", "(", "'-p'", ",", "'--port'", ",", "type", "=", "'int'", ",", "default", "=", "opts", ".", "port", ",", "help", "=", "(", "'Port number of MUA'", ")", ")", "parser", ".", "add_option", "(", "'--username'", ",", "default", "=", "opts", ".", "username", ",", "help", "=", "(", "'Username for authentication'", ")", ")", "parser", ".", "add_option", "(", "'--password'", ",", "help", "=", "(", "'Password for authentication'", ")", ")", "parser", ".", "add_option", "(", "'--setup'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "(", "'Setup %(prog)s configuration'", ")", ")", "parser", ".", "add_option", "(", "'--check'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "(", "'Send %(prog)s configuration via email for '", "'checking. Only for Unix system.'", ")", ")", "# display help and exit", "if", "len", "(", "args", ")", "==", "1", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "0", ")", "else", ":", "# translate all specified arguments to unicode", "if", "sys", ".", "version_info", "<", "(", "3", ",", ")", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "args", "=", "map", "(", "lambda", "x", ":", "unicode", "(", "x", ",", "encoding", ")", ",", "args", ")", "# split argv to two array", "lhs", ",", "rhs", "=", "split_arguments", "(", "args", ")", "# parse options", "opts", "=", "parser", ".", "parse_args", "(", "args", "=", "lhs", "[", "1", ":", "]", ")", "[", "0", "]", "return", "rhs", ",", "opts" ]
Parse specified arguments via config Parameters ---------- args : list Command line arguments config : object ConfigParser instance which values are used as default values of options Returns ------- list : arguments, options options indicate the return value of ArgumentParser and arguments indicate the execution command/arguments
[ "Parse", "specified", "arguments", "via", "config" ]
1b6d7d1faa2cea13bfaa1f35130f279a0115e686
https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/arguments.py#L36-L117
train
jonashaag/httpauth
httpauth.py
BaseHttpAuthMiddleware.should_require_authentication
def should_require_authentication(self, url): """ Returns True if we should require authentication for the URL given """ return (not self.routes # require auth for all URLs or any(route.match(url) for route in self.routes))
python
def should_require_authentication(self, url): """ Returns True if we should require authentication for the URL given """ return (not self.routes # require auth for all URLs or any(route.match(url) for route in self.routes))
[ "def", "should_require_authentication", "(", "self", ",", "url", ")", ":", "return", "(", "not", "self", ".", "routes", "# require auth for all URLs", "or", "any", "(", "route", ".", "match", "(", "url", ")", "for", "route", "in", "self", ".", "routes", ")", ")" ]
Returns True if we should require authentication for the URL given
[ "Returns", "True", "if", "we", "should", "require", "authentication", "for", "the", "URL", "given" ]
1b2ab9cb5192b474c9723182690c352337f754bc
https://github.com/jonashaag/httpauth/blob/1b2ab9cb5192b474c9723182690c352337f754bc/httpauth.py#L99-L102
train
jonashaag/httpauth
httpauth.py
BaseHttpAuthMiddleware.authenticate
def authenticate(self, environ): """ Returns True if the credentials passed in the Authorization header are valid, False otherwise. """ try: hd = parse_dict_header(environ['HTTP_AUTHORIZATION']) except (KeyError, ValueError): return False return self.credentials_valid( hd['response'], environ['REQUEST_METHOD'], environ['httpauth.uri'], hd['nonce'], hd['Digest username'], )
python
def authenticate(self, environ): """ Returns True if the credentials passed in the Authorization header are valid, False otherwise. """ try: hd = parse_dict_header(environ['HTTP_AUTHORIZATION']) except (KeyError, ValueError): return False return self.credentials_valid( hd['response'], environ['REQUEST_METHOD'], environ['httpauth.uri'], hd['nonce'], hd['Digest username'], )
[ "def", "authenticate", "(", "self", ",", "environ", ")", ":", "try", ":", "hd", "=", "parse_dict_header", "(", "environ", "[", "'HTTP_AUTHORIZATION'", "]", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "return", "False", "return", "self", ".", "credentials_valid", "(", "hd", "[", "'response'", "]", ",", "environ", "[", "'REQUEST_METHOD'", "]", ",", "environ", "[", "'httpauth.uri'", "]", ",", "hd", "[", "'nonce'", "]", ",", "hd", "[", "'Digest username'", "]", ",", ")" ]
Returns True if the credentials passed in the Authorization header are valid, False otherwise.
[ "Returns", "True", "if", "the", "credentials", "passed", "in", "the", "Authorization", "header", "are", "valid", "False", "otherwise", "." ]
1b2ab9cb5192b474c9723182690c352337f754bc
https://github.com/jonashaag/httpauth/blob/1b2ab9cb5192b474c9723182690c352337f754bc/httpauth.py#L104-L120
train
frasertweedale/ledgertools
ltlib/readers/CSV.py
Reader.next
def next(self): """Return the next transaction object. StopIteration will be propagated from self.csvreader.next() """ try: return self.dict_to_xn(self.csvreader.next()) except MetadataException: # row was metadata; proceed to next row return next(self)
python
def next(self): """Return the next transaction object. StopIteration will be propagated from self.csvreader.next() """ try: return self.dict_to_xn(self.csvreader.next()) except MetadataException: # row was metadata; proceed to next row return next(self)
[ "def", "next", "(", "self", ")", ":", "try", ":", "return", "self", ".", "dict_to_xn", "(", "self", ".", "csvreader", ".", "next", "(", ")", ")", "except", "MetadataException", ":", "# row was metadata; proceed to next row", "return", "next", "(", "self", ")" ]
Return the next transaction object. StopIteration will be propagated from self.csvreader.next()
[ "Return", "the", "next", "transaction", "object", "." ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/readers/CSV.py#L92-L101
train
frasertweedale/ledgertools
ltlib/readers/CSV.py
Reader.parse_date
def parse_date(self, date): """Parse the date and return a datetime object The heuristic for determining the date is: - if ``date_format`` is set, parse using strptime - if one field of 8 digits, YYYYMMDD - split by '-' or '/' - (TODO: substitute string months with their numbers) - if (2, 2, 4), DD-MM-YYYY (not the peculiar US order) - if (4, 2, 2), YYYY-MM-DD - ka-boom! The issue of reliably discerning between DD-MM-YYYY (sane) vs. MM-DD-YYYY (absurd, but Big In America), without being told what's being used, is intractable. Return a datetime.date object. """ if self.date_format is not None: return datetime.datetime.strptime(date, self.date_format).date() if re.match('\d{8}$', date): # assume YYYYMMDD return datetime.date(*map(int, (date[:4], date[4:6], date[6:]))) try: # split by '-' or '/' parts = date_delim.split(date, 2) # maxsplit=2 if len(parts) == 3: if len(parts[0]) == 4: # YYYY, MM, DD return datetime.date(*map(int, parts)) elif len(parts[2]) == 4: # DD, MM, YYYY return datetime.date(*map(int, reversed(parts))) # fail except TypeError, ValueError: raise reader.DataError('Bad date format: "{}"'.format(date))
python
def parse_date(self, date): """Parse the date and return a datetime object The heuristic for determining the date is: - if ``date_format`` is set, parse using strptime - if one field of 8 digits, YYYYMMDD - split by '-' or '/' - (TODO: substitute string months with their numbers) - if (2, 2, 4), DD-MM-YYYY (not the peculiar US order) - if (4, 2, 2), YYYY-MM-DD - ka-boom! The issue of reliably discerning between DD-MM-YYYY (sane) vs. MM-DD-YYYY (absurd, but Big In America), without being told what's being used, is intractable. Return a datetime.date object. """ if self.date_format is not None: return datetime.datetime.strptime(date, self.date_format).date() if re.match('\d{8}$', date): # assume YYYYMMDD return datetime.date(*map(int, (date[:4], date[4:6], date[6:]))) try: # split by '-' or '/' parts = date_delim.split(date, 2) # maxsplit=2 if len(parts) == 3: if len(parts[0]) == 4: # YYYY, MM, DD return datetime.date(*map(int, parts)) elif len(parts[2]) == 4: # DD, MM, YYYY return datetime.date(*map(int, reversed(parts))) # fail except TypeError, ValueError: raise reader.DataError('Bad date format: "{}"'.format(date))
[ "def", "parse_date", "(", "self", ",", "date", ")", ":", "if", "self", ".", "date_format", "is", "not", "None", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "self", ".", "date_format", ")", ".", "date", "(", ")", "if", "re", ".", "match", "(", "'\\d{8}$'", ",", "date", ")", ":", "# assume YYYYMMDD", "return", "datetime", ".", "date", "(", "*", "map", "(", "int", ",", "(", "date", "[", ":", "4", "]", ",", "date", "[", "4", ":", "6", "]", ",", "date", "[", "6", ":", "]", ")", ")", ")", "try", ":", "# split by '-' or '/'", "parts", "=", "date_delim", ".", "split", "(", "date", ",", "2", ")", "# maxsplit=2", "if", "len", "(", "parts", ")", "==", "3", ":", "if", "len", "(", "parts", "[", "0", "]", ")", "==", "4", ":", "# YYYY, MM, DD", "return", "datetime", ".", "date", "(", "*", "map", "(", "int", ",", "parts", ")", ")", "elif", "len", "(", "parts", "[", "2", "]", ")", "==", "4", ":", "# DD, MM, YYYY", "return", "datetime", ".", "date", "(", "*", "map", "(", "int", ",", "reversed", "(", "parts", ")", ")", ")", "# fail", "except", "TypeError", ",", "ValueError", ":", "raise", "reader", ".", "DataError", "(", "'Bad date format: \"{}\"'", ".", "format", "(", "date", ")", ")" ]
Parse the date and return a datetime object The heuristic for determining the date is: - if ``date_format`` is set, parse using strptime - if one field of 8 digits, YYYYMMDD - split by '-' or '/' - (TODO: substitute string months with their numbers) - if (2, 2, 4), DD-MM-YYYY (not the peculiar US order) - if (4, 2, 2), YYYY-MM-DD - ka-boom! The issue of reliably discerning between DD-MM-YYYY (sane) vs. MM-DD-YYYY (absurd, but Big In America), without being told what's being used, is intractable. Return a datetime.date object.
[ "Parse", "the", "date", "and", "return", "a", "datetime", "object" ]
a695f8667d72253e5448693c12f0282d09902aaa
https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/readers/CSV.py#L103-L140
train
digidotcom/python-wvalib
wva/subscriptions.py
WVASubscription.create
def create(self, uri, buffer="queue", interval=10): """Create a subscription with this short name and the provided parameters For more information on what the parameters required here mean, please refer to the `WVA Documentation <http://goo.gl/DRcOQf>`_. :raises WVAError: If there is a problem creating the new subscription """ return self._http_client.put_json("subscriptions/{}".format(self.short_name), { "subscription": { "uri": uri, "buffer": buffer, "interval": interval, } })
python
def create(self, uri, buffer="queue", interval=10): """Create a subscription with this short name and the provided parameters For more information on what the parameters required here mean, please refer to the `WVA Documentation <http://goo.gl/DRcOQf>`_. :raises WVAError: If there is a problem creating the new subscription """ return self._http_client.put_json("subscriptions/{}".format(self.short_name), { "subscription": { "uri": uri, "buffer": buffer, "interval": interval, } })
[ "def", "create", "(", "self", ",", "uri", ",", "buffer", "=", "\"queue\"", ",", "interval", "=", "10", ")", ":", "return", "self", ".", "_http_client", ".", "put_json", "(", "\"subscriptions/{}\"", ".", "format", "(", "self", ".", "short_name", ")", ",", "{", "\"subscription\"", ":", "{", "\"uri\"", ":", "uri", ",", "\"buffer\"", ":", "buffer", ",", "\"interval\"", ":", "interval", ",", "}", "}", ")" ]
Create a subscription with this short name and the provided parameters For more information on what the parameters required here mean, please refer to the `WVA Documentation <http://goo.gl/DRcOQf>`_. :raises WVAError: If there is a problem creating the new subscription
[ "Create", "a", "subscription", "with", "this", "short", "name", "and", "the", "provided", "parameters" ]
4252735e2775f80ebaffd813fbe84046d26906b3
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/subscriptions.py#L15-L29
train
Starlink/palpy
support/palvers.py
read_pal_version
def read_pal_version(): """ Scans the PAL configure.ac looking for the version number. (vers, maj, min, patchlevel) = read_pal_version() Returns the version as a string and the major, minor and patchlevel version integers """ verfile = os.path.join("cextern", "pal", "configure.ac") verstring = "-1.-1.-1" for line in open(verfile): if line.startswith("AC_INIT"): # Version will be in string [nn.mm.pp] match = re.search(r"\[(\d+\.\d+\.\d+)\]", line) if match: verstring = match.group(1) break (major, minor, patch) = verstring.split(".") return (verstring, major, minor, patch)
python
def read_pal_version(): """ Scans the PAL configure.ac looking for the version number. (vers, maj, min, patchlevel) = read_pal_version() Returns the version as a string and the major, minor and patchlevel version integers """ verfile = os.path.join("cextern", "pal", "configure.ac") verstring = "-1.-1.-1" for line in open(verfile): if line.startswith("AC_INIT"): # Version will be in string [nn.mm.pp] match = re.search(r"\[(\d+\.\d+\.\d+)\]", line) if match: verstring = match.group(1) break (major, minor, patch) = verstring.split(".") return (verstring, major, minor, patch)
[ "def", "read_pal_version", "(", ")", ":", "verfile", "=", "os", ".", "path", ".", "join", "(", "\"cextern\"", ",", "\"pal\"", ",", "\"configure.ac\"", ")", "verstring", "=", "\"-1.-1.-1\"", "for", "line", "in", "open", "(", "verfile", ")", ":", "if", "line", ".", "startswith", "(", "\"AC_INIT\"", ")", ":", "# Version will be in string [nn.mm.pp]", "match", "=", "re", ".", "search", "(", "r\"\\[(\\d+\\.\\d+\\.\\d+)\\]\"", ",", "line", ")", "if", "match", ":", "verstring", "=", "match", ".", "group", "(", "1", ")", "break", "(", "major", ",", "minor", ",", "patch", ")", "=", "verstring", ".", "split", "(", "\".\"", ")", "return", "(", "verstring", ",", "major", ",", "minor", ",", "patch", ")" ]
Scans the PAL configure.ac looking for the version number. (vers, maj, min, patchlevel) = read_pal_version() Returns the version as a string and the major, minor and patchlevel version integers
[ "Scans", "the", "PAL", "configure", ".", "ac", "looking", "for", "the", "version", "number", "." ]
a7ad77058614a93b29a004bbad6bc0e61c73b6e0
https://github.com/Starlink/palpy/blob/a7ad77058614a93b29a004bbad6bc0e61c73b6e0/support/palvers.py#L35-L55
train
cloudbase/python-hnvclient
hnv/client.py
_BaseHNVModel._reset_model
def _reset_model(self, response): """Update the fields value with the received information.""" # pylint: disable=no-member # Reset the model to the initial state self._provision_done = False # Set back the provision flag self._changes.clear() # Clear the changes # Process the raw data from the update response fields = self.process_raw_data(response) # Update the current model representation self._set_fields(fields) # Lock the current model self._provision_done = True
python
def _reset_model(self, response): """Update the fields value with the received information.""" # pylint: disable=no-member # Reset the model to the initial state self._provision_done = False # Set back the provision flag self._changes.clear() # Clear the changes # Process the raw data from the update response fields = self.process_raw_data(response) # Update the current model representation self._set_fields(fields) # Lock the current model self._provision_done = True
[ "def", "_reset_model", "(", "self", ",", "response", ")", ":", "# pylint: disable=no-member", "# Reset the model to the initial state", "self", ".", "_provision_done", "=", "False", "# Set back the provision flag", "self", ".", "_changes", ".", "clear", "(", ")", "# Clear the changes", "# Process the raw data from the update response", "fields", "=", "self", ".", "process_raw_data", "(", "response", ")", "# Update the current model representation", "self", ".", "_set_fields", "(", "fields", ")", "# Lock the current model", "self", ".", "_provision_done", "=", "True" ]
Update the fields value with the received information.
[ "Update", "the", "fields", "value", "with", "the", "received", "information", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L105-L120
train
cloudbase/python-hnvclient
hnv/client.py
_BaseHNVModel.is_ready
def is_ready(self): """Check if the current model is ready to be used.""" if not self.provisioning_state: raise exception.ServiceException("The object doesn't contain " "`provisioningState`.") elif self.provisioning_state == constant.FAILED: raise exception.ServiceException( "Failed to complete the required operation.") elif self.provisioning_state == constant.SUCCEEDED: LOG.debug("The model %s: %s was successfully updated " "(or created).", self.__class__.__name__, self.resource_id) return True return False
python
def is_ready(self): """Check if the current model is ready to be used.""" if not self.provisioning_state: raise exception.ServiceException("The object doesn't contain " "`provisioningState`.") elif self.provisioning_state == constant.FAILED: raise exception.ServiceException( "Failed to complete the required operation.") elif self.provisioning_state == constant.SUCCEEDED: LOG.debug("The model %s: %s was successfully updated " "(or created).", self.__class__.__name__, self.resource_id) return True return False
[ "def", "is_ready", "(", "self", ")", ":", "if", "not", "self", ".", "provisioning_state", ":", "raise", "exception", ".", "ServiceException", "(", "\"The object doesn't contain \"", "\"`provisioningState`.\"", ")", "elif", "self", ".", "provisioning_state", "==", "constant", ".", "FAILED", ":", "raise", "exception", ".", "ServiceException", "(", "\"Failed to complete the required operation.\"", ")", "elif", "self", ".", "provisioning_state", "==", "constant", ".", "SUCCEEDED", ":", "LOG", ".", "debug", "(", "\"The model %s: %s was successfully updated \"", "\"(or created).\"", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "resource_id", ")", "return", "True", "return", "False" ]
Check if the current model is ready to be used.
[ "Check", "if", "the", "current", "model", "is", "ready", "to", "be", "used", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L122-L136
train
cloudbase/python-hnvclient
hnv/client.py
_BaseHNVModel._get_all
def _get_all(cls, parent_id=None, grandparent_id=None): """Retrives all the required resources.""" client = cls._get_client() endpoint = cls._endpoint.format(resource_id="", parent_id=parent_id or "", grandparent_id=grandparent_id or "") resources = [] while True: response = client.get_resource(endpoint) for raw_data in response.get("value", []): raw_data["parentResourceID"] = parent_id raw_data["grandParentResourceID"] = grandparent_id resources.append(cls.from_raw_data(raw_data)) endpoint = response.get("nextLink") if not endpoint: break return resources
python
def _get_all(cls, parent_id=None, grandparent_id=None): """Retrives all the required resources.""" client = cls._get_client() endpoint = cls._endpoint.format(resource_id="", parent_id=parent_id or "", grandparent_id=grandparent_id or "") resources = [] while True: response = client.get_resource(endpoint) for raw_data in response.get("value", []): raw_data["parentResourceID"] = parent_id raw_data["grandParentResourceID"] = grandparent_id resources.append(cls.from_raw_data(raw_data)) endpoint = response.get("nextLink") if not endpoint: break return resources
[ "def", "_get_all", "(", "cls", ",", "parent_id", "=", "None", ",", "grandparent_id", "=", "None", ")", ":", "client", "=", "cls", ".", "_get_client", "(", ")", "endpoint", "=", "cls", ".", "_endpoint", ".", "format", "(", "resource_id", "=", "\"\"", ",", "parent_id", "=", "parent_id", "or", "\"\"", ",", "grandparent_id", "=", "grandparent_id", "or", "\"\"", ")", "resources", "=", "[", "]", "while", "True", ":", "response", "=", "client", ".", "get_resource", "(", "endpoint", ")", "for", "raw_data", "in", "response", ".", "get", "(", "\"value\"", ",", "[", "]", ")", ":", "raw_data", "[", "\"parentResourceID\"", "]", "=", "parent_id", "raw_data", "[", "\"grandParentResourceID\"", "]", "=", "grandparent_id", "resources", ".", "append", "(", "cls", ".", "from_raw_data", "(", "raw_data", ")", ")", "endpoint", "=", "response", ".", "get", "(", "\"nextLink\"", ")", "if", "not", "endpoint", ":", "break", "return", "resources" ]
Retrives all the required resources.
[ "Retrives", "all", "the", "required", "resources", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L148-L164
train
cloudbase/python-hnvclient
hnv/client.py
_BaseHNVModel.get
def get(cls, resource_id=None, parent_id=None, grandparent_id=None): """Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource. """ if not resource_id: return cls._get_all(parent_id, grandparent_id) else: return cls._get(resource_id, parent_id, grandparent_id)
python
def get(cls, resource_id=None, parent_id=None, grandparent_id=None): """Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource. """ if not resource_id: return cls._get_all(parent_id, grandparent_id) else: return cls._get(resource_id, parent_id, grandparent_id)
[ "def", "get", "(", "cls", ",", "resource_id", "=", "None", ",", "parent_id", "=", "None", ",", "grandparent_id", "=", "None", ")", ":", "if", "not", "resource_id", ":", "return", "cls", ".", "_get_all", "(", "parent_id", ",", "grandparent_id", ")", "else", ":", "return", "cls", ".", "_get", "(", "resource_id", ",", "parent_id", ",", "grandparent_id", ")" ]
Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource.
[ "Retrieves", "the", "required", "resources", "." ]
b019452af01db22629809b8930357a2ebf6494be
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L179-L194
train