repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
NoviceLive/intellicoder | intellicoder/utils.py | AttrsGetter.get_attrs | def get_attrs(self, *names):
"""Get multiple attributes from multiple objects."""
attrs = [getattr(self, name) for name in names]
return attrs | python | def get_attrs(self, *names):
"""Get multiple attributes from multiple objects."""
attrs = [getattr(self, name) for name in names]
return attrs | [
"def",
"get_attrs",
"(",
"self",
",",
"*",
"names",
")",
":",
"attrs",
"=",
"[",
"getattr",
"(",
"self",
",",
"name",
")",
"for",
"name",
"in",
"names",
"]",
"return",
"attrs"
]
| Get multiple attributes from multiple objects. | [
"Get",
"multiple",
"attributes",
"from",
"multiple",
"objects",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L155-L158 | train |
dpa-newslab/livebridge | livebridge/base/posts.py | BasePost.target_id | def target_id(self):
"""Returns the id the target, to which this post has to be syndicated.
:returns: string"""
# already set?
if self._target_id:
return self._target_id
# post already exists?
if self._existing:
self._target_id = self._existing.get("target_id")
return self._target_id | python | def target_id(self):
"""Returns the id the target, to which this post has to be syndicated.
:returns: string"""
# already set?
if self._target_id:
return self._target_id
# post already exists?
if self._existing:
self._target_id = self._existing.get("target_id")
return self._target_id | [
"def",
"target_id",
"(",
"self",
")",
":",
"# already set?",
"if",
"self",
".",
"_target_id",
":",
"return",
"self",
".",
"_target_id",
"# post already exists?",
"if",
"self",
".",
"_existing",
":",
"self",
".",
"_target_id",
"=",
"self",
".",
"_existing",
".",
"get",
"(",
"\"target_id\"",
")",
"return",
"self",
".",
"_target_id"
]
| Returns the id the target, to which this post has to be syndicated.
:returns: string | [
"Returns",
"the",
"id",
"the",
"target",
"to",
"which",
"this",
"post",
"has",
"to",
"be",
"syndicated",
"."
]
| d930e887faa2f882d15b574f0f1fe4a580d7c5fa | https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/posts.py#L115-L125 | train |
NoviceLive/intellicoder | intellicoder/intellisense/formatters.py | with_formatter | def with_formatter(formatter):
"""Apply a formatter function the return value
of the decorated function.
"""
def _decorator_after_args(unwrapped):
def _wrapped(self, *args, **kwargs):
logging.debug('unwrapped: %s', unwrapped)
logging.debug('self: %s', self)
logging.debug('args: %s', args)
logging.debug('kwargs: %s', kwargs)
return_value = unwrapped(self, *args, **kwargs)
if 'raw' in kwargs and kwargs['raw']:
return return_value
else:
return formatter(return_value)
return _wrapped
return _decorator_after_args | python | def with_formatter(formatter):
"""Apply a formatter function the return value
of the decorated function.
"""
def _decorator_after_args(unwrapped):
def _wrapped(self, *args, **kwargs):
logging.debug('unwrapped: %s', unwrapped)
logging.debug('self: %s', self)
logging.debug('args: %s', args)
logging.debug('kwargs: %s', kwargs)
return_value = unwrapped(self, *args, **kwargs)
if 'raw' in kwargs and kwargs['raw']:
return return_value
else:
return formatter(return_value)
return _wrapped
return _decorator_after_args | [
"def",
"with_formatter",
"(",
"formatter",
")",
":",
"def",
"_decorator_after_args",
"(",
"unwrapped",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"debug",
"(",
"'unwrapped: %s'",
",",
"unwrapped",
")",
"logging",
".",
"debug",
"(",
"'self: %s'",
",",
"self",
")",
"logging",
".",
"debug",
"(",
"'args: %s'",
",",
"args",
")",
"logging",
".",
"debug",
"(",
"'kwargs: %s'",
",",
"kwargs",
")",
"return_value",
"=",
"unwrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"'raw'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'raw'",
"]",
":",
"return",
"return_value",
"else",
":",
"return",
"formatter",
"(",
"return_value",
")",
"return",
"_wrapped",
"return",
"_decorator_after_args"
]
| Apply a formatter function the return value
of the decorated function. | [
"Apply",
"a",
"formatter",
"function",
"the",
"return",
"value",
"of",
"the",
"decorated",
"function",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L31-L47 | train |
NoviceLive/intellicoder | intellicoder/intellisense/formatters.py | format_info | def format_info(raw):
"""Format a string representing the information
concerning the name.
"""
logging.debug(_('raw[0]: %s'), raw[0])
results, sense = raw
# A scenario where ORM really stands out.
new = '\n'.join(
'{} {} {} {}'.format(
i[0], sense.kind_id_to_name(i[1]),
sense.file_id_to_name(i[2]).lower(),
i[3] + ' ' if i[3] else '').strip()
for i in results)
return new | python | def format_info(raw):
"""Format a string representing the information
concerning the name.
"""
logging.debug(_('raw[0]: %s'), raw[0])
results, sense = raw
# A scenario where ORM really stands out.
new = '\n'.join(
'{} {} {} {}'.format(
i[0], sense.kind_id_to_name(i[1]),
sense.file_id_to_name(i[2]).lower(),
i[3] + ' ' if i[3] else '').strip()
for i in results)
return new | [
"def",
"format_info",
"(",
"raw",
")",
":",
"logging",
".",
"debug",
"(",
"_",
"(",
"'raw[0]: %s'",
")",
",",
"raw",
"[",
"0",
"]",
")",
"results",
",",
"sense",
"=",
"raw",
"# A scenario where ORM really stands out.",
"new",
"=",
"'\\n'",
".",
"join",
"(",
"'{} {} {} {}'",
".",
"format",
"(",
"i",
"[",
"0",
"]",
",",
"sense",
".",
"kind_id_to_name",
"(",
"i",
"[",
"1",
"]",
")",
",",
"sense",
".",
"file_id_to_name",
"(",
"i",
"[",
"2",
"]",
")",
".",
"lower",
"(",
")",
",",
"i",
"[",
"3",
"]",
"+",
"' '",
"if",
"i",
"[",
"3",
"]",
"else",
"''",
")",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"results",
")",
"return",
"new"
]
| Format a string representing the information
concerning the name. | [
"Format",
"a",
"string",
"representing",
"the",
"information",
"concerning",
"the",
"name",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L59-L72 | train |
NoviceLive/intellicoder | intellicoder/intellisense/formatters.py | format_names | def format_names(raw):
"""Format a string representing the names contained in the files.
"""
if raw:
raw = [
'{}:\n{}'.format(
header.lower(), ' '.join(func[0] for func in funcs)
)
for header, funcs in raw
]
return '\n'.join(raw)
return '' | python | def format_names(raw):
"""Format a string representing the names contained in the files.
"""
if raw:
raw = [
'{}:\n{}'.format(
header.lower(), ' '.join(func[0] for func in funcs)
)
for header, funcs in raw
]
return '\n'.join(raw)
return '' | [
"def",
"format_names",
"(",
"raw",
")",
":",
"if",
"raw",
":",
"raw",
"=",
"[",
"'{}:\\n{}'",
".",
"format",
"(",
"header",
".",
"lower",
"(",
")",
",",
"' '",
".",
"join",
"(",
"func",
"[",
"0",
"]",
"for",
"func",
"in",
"funcs",
")",
")",
"for",
"header",
",",
"funcs",
"in",
"raw",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"raw",
")",
"return",
"''"
]
| Format a string representing the names contained in the files. | [
"Format",
"a",
"string",
"representing",
"the",
"names",
"contained",
"in",
"the",
"files",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L75-L86 | train |
NoviceLive/intellicoder | intellicoder/intellisense/formatters.py | format_kinds | def format_kinds(raw):
"""Format a string representing the kinds."""
output = ' '.join('{} {}'.format(*kind) for kind in raw if kind)
return output | python | def format_kinds(raw):
"""Format a string representing the kinds."""
output = ' '.join('{} {}'.format(*kind) for kind in raw if kind)
return output | [
"def",
"format_kinds",
"(",
"raw",
")",
":",
"output",
"=",
"' '",
".",
"join",
"(",
"'{} {}'",
".",
"format",
"(",
"*",
"kind",
")",
"for",
"kind",
"in",
"raw",
"if",
"kind",
")",
"return",
"output"
]
| Format a string representing the kinds. | [
"Format",
"a",
"string",
"representing",
"the",
"kinds",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L89-L92 | train |
etal/biocma | biocma/utils.py | find_seq_rec | def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name)) | python | def find_seq_rec(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first matching record."""
if case_sensitive:
def test(name, rec):
return name in rec['id']
else:
def test(name, rec):
return name.upper() in rec['id'].upper()
for rec in block['sequences']:
if test(name, rec):
return rec
raise ValueError("No sequence ID matches %s" % repr(name)) | [
"def",
"find_seq_rec",
"(",
"block",
",",
"name",
",",
"case_sensitive",
"=",
"True",
")",
":",
"if",
"case_sensitive",
":",
"def",
"test",
"(",
"name",
",",
"rec",
")",
":",
"return",
"name",
"in",
"rec",
"[",
"'id'",
"]",
"else",
":",
"def",
"test",
"(",
"name",
",",
"rec",
")",
":",
"return",
"name",
".",
"upper",
"(",
")",
"in",
"rec",
"[",
"'id'",
"]",
".",
"upper",
"(",
")",
"for",
"rec",
"in",
"block",
"[",
"'sequences'",
"]",
":",
"if",
"test",
"(",
"name",
",",
"rec",
")",
":",
"return",
"rec",
"raise",
"ValueError",
"(",
"\"No sequence ID matches %s\"",
"%",
"repr",
"(",
"name",
")",
")"
]
| Given part of a sequence ID, find the first matching record. | [
"Given",
"part",
"of",
"a",
"sequence",
"ID",
"find",
"the",
"first",
"matching",
"record",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L6-L18 | train |
etal/biocma | biocma/utils.py | find_seq_id | def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id'] | python | def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id'] | [
"def",
"find_seq_id",
"(",
"block",
",",
"name",
",",
"case_sensitive",
"=",
"True",
")",
":",
"# logging.warn(\"DEPRECATED: Try to use cma.find_seq_rec instead\")",
"rec",
"=",
"find_seq_rec",
"(",
"block",
",",
"name",
",",
"case_sensitive",
")",
"return",
"rec",
"[",
"'id'",
"]"
]
| Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found. | [
"Given",
"part",
"of",
"a",
"sequence",
"ID",
"find",
"the",
"first",
"actual",
"ID",
"that",
"contains",
"it",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L21-L33 | train |
etal/biocma | biocma/utils.py | get_consensus | def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence | python | def get_consensus(block):
"""Calculate a simple consensus sequence for the block."""
from collections import Counter
# Take aligned (non-insert) chars from all rows; transpose
columns = zip(*[[c for c in row['seq'] if not c.islower()]
for row in block['sequences']])
cons_chars = [Counter(col).most_common()[0][0] for col in columns]
cons_chars = [c if c != '-' else 'X' for c in cons_chars]
assert len(cons_chars) == block['query_length']
cons_sequence = {
'index': 1,
'id': 'consensus',
'description': '',
'dbxrefs': {},
'phylum': '',
'taxchar': '',
'head_len': None,
'tail_len': None,
'head_seq': '',
'tail_seq': '',
'length': block['query_length'],
'seq': ''.join(cons_chars),
}
return cons_sequence | [
"def",
"get_consensus",
"(",
"block",
")",
":",
"from",
"collections",
"import",
"Counter",
"# Take aligned (non-insert) chars from all rows; transpose",
"columns",
"=",
"zip",
"(",
"*",
"[",
"[",
"c",
"for",
"c",
"in",
"row",
"[",
"'seq'",
"]",
"if",
"not",
"c",
".",
"islower",
"(",
")",
"]",
"for",
"row",
"in",
"block",
"[",
"'sequences'",
"]",
"]",
")",
"cons_chars",
"=",
"[",
"Counter",
"(",
"col",
")",
".",
"most_common",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"col",
"in",
"columns",
"]",
"cons_chars",
"=",
"[",
"c",
"if",
"c",
"!=",
"'-'",
"else",
"'X'",
"for",
"c",
"in",
"cons_chars",
"]",
"assert",
"len",
"(",
"cons_chars",
")",
"==",
"block",
"[",
"'query_length'",
"]",
"cons_sequence",
"=",
"{",
"'index'",
":",
"1",
",",
"'id'",
":",
"'consensus'",
",",
"'description'",
":",
"''",
",",
"'dbxrefs'",
":",
"{",
"}",
",",
"'phylum'",
":",
"''",
",",
"'taxchar'",
":",
"''",
",",
"'head_len'",
":",
"None",
",",
"'tail_len'",
":",
"None",
",",
"'head_seq'",
":",
"''",
",",
"'tail_seq'",
":",
"''",
",",
"'length'",
":",
"block",
"[",
"'query_length'",
"]",
",",
"'seq'",
":",
"''",
".",
"join",
"(",
"cons_chars",
")",
",",
"}",
"return",
"cons_sequence"
]
| Calculate a simple consensus sequence for the block. | [
"Calculate",
"a",
"simple",
"consensus",
"sequence",
"for",
"the",
"block",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L36-L60 | train |
etal/biocma | biocma/utils.py | get_conservation | def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus)) | python | def get_conservation(block):
"""Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation}
"""
consensus = block['sequences'][0]['seq']
assert all(c.isupper() for c in consensus), \
"So-called consensus contains indels!"
# remove all non-consensus positions -- now alignment is easy
cleaned = [[c for c in s['seq'] if not c.islower()]
for s in block['sequences'][1:]]
height = float(len(cleaned))
# validation
for row in cleaned:
if len(row) != len(consensus):
raise ValueError("Aligned sequence length (%s) doesn't match "
"consensus (%s)"
% (len(row), len(consensus)))
# transpose & go
columns = zip(*cleaned)
return dict((idx + 1, columns[idx].count(cons_char) / height)
for idx, cons_char in enumerate(consensus)) | [
"def",
"get_conservation",
"(",
"block",
")",
":",
"consensus",
"=",
"block",
"[",
"'sequences'",
"]",
"[",
"0",
"]",
"[",
"'seq'",
"]",
"assert",
"all",
"(",
"c",
".",
"isupper",
"(",
")",
"for",
"c",
"in",
"consensus",
")",
",",
"\"So-called consensus contains indels!\"",
"# remove all non-consensus positions -- now alignment is easy",
"cleaned",
"=",
"[",
"[",
"c",
"for",
"c",
"in",
"s",
"[",
"'seq'",
"]",
"if",
"not",
"c",
".",
"islower",
"(",
")",
"]",
"for",
"s",
"in",
"block",
"[",
"'sequences'",
"]",
"[",
"1",
":",
"]",
"]",
"height",
"=",
"float",
"(",
"len",
"(",
"cleaned",
")",
")",
"# validation",
"for",
"row",
"in",
"cleaned",
":",
"if",
"len",
"(",
"row",
")",
"!=",
"len",
"(",
"consensus",
")",
":",
"raise",
"ValueError",
"(",
"\"Aligned sequence length (%s) doesn't match \"",
"\"consensus (%s)\"",
"%",
"(",
"len",
"(",
"row",
")",
",",
"len",
"(",
"consensus",
")",
")",
")",
"# transpose & go",
"columns",
"=",
"zip",
"(",
"*",
"cleaned",
")",
"return",
"dict",
"(",
"(",
"idx",
"+",
"1",
",",
"columns",
"[",
"idx",
"]",
".",
"count",
"(",
"cons_char",
")",
"/",
"height",
")",
"for",
"idx",
",",
"cons_char",
"in",
"enumerate",
"(",
"consensus",
")",
")"
]
| Calculate conservation levels at each consensus position.
Return a dict of {position: float conservation} | [
"Calculate",
"conservation",
"levels",
"at",
"each",
"consensus",
"position",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L63-L84 | train |
etal/biocma | biocma/utils.py | get_equivalent_positions | def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies | python | def get_equivalent_positions(block):
"""Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence.
"""
consensus = block['sequences'][0]['seq']
rest = block['sequences'][1:]
# Validation
if '-' in consensus or '.' in consensus:
raise ValueError("First sequence (consensus?) contains gaps")
# Check for duplicate sequence IDs
seen = set()
dupes = set()
for rec in rest:
if rec['id'] in seen:
dupes.add(rec['id'])
else:
seen.add(rec['id'])
if dupes:
raise ValueError("Duplicate sequences:\n" + '\n'.join(dupes))
curr_shift = {}
curr_resn = {}
# NB: consensus doesn't have head/tail, but other sequences may
for rec in rest:
# Count inserts seen so far -- shift string indexes by this far ahead to
# get the "equivalent" location in the sequence string
# - as in, how far ahead in the current seq do we need to jump to get
# to a position equivalent to what's in the consensus?
# - can this ever be less than 0 (==consensus)? No, because that's
# where gaps come from. Good.
curr_shift[rec['id']] = 0
# Residue number in the actual sequence at the current (shifted)
# location
# curr_posn[id] = current equivalent res.num in `id` to cons[i]
curr_resn[rec['id']] = rec['head_len']
equivalencies = dict((i+1, {}) for i in xrange(len(consensus)))
# Map each character position i in the consensus sequence
# to equivalent residues in each of the other sequences
# i = index in the consensus string (== consensus res.num - 1)
for i, char in enumerate(consensus):
assert char.isupper()
for rec in rest:
rid = rec['id']
strposn = i + curr_shift[rid]
if rec['seq'][strposn].isupper():
# Match
curr_resn[rid] += 1
elif rec['seq'][strposn].islower():
# Insert
while rec['seq'][strposn].islower():
# Count the whole insert size
curr_shift[rid] += 1
curr_resn[rid] += 1
strposn += 1
curr_resn[rid] += 1 # Count the next match, too
else:
# Deletion / gap
assert rec['seq'][strposn] in '.-'
continue
equivalencies[i+1][rid] = curr_resn[rid]
return equivalencies | [
"def",
"get_equivalent_positions",
"(",
"block",
")",
":",
"consensus",
"=",
"block",
"[",
"'sequences'",
"]",
"[",
"0",
"]",
"[",
"'seq'",
"]",
"rest",
"=",
"block",
"[",
"'sequences'",
"]",
"[",
"1",
":",
"]",
"# Validation",
"if",
"'-'",
"in",
"consensus",
"or",
"'.'",
"in",
"consensus",
":",
"raise",
"ValueError",
"(",
"\"First sequence (consensus?) contains gaps\"",
")",
"# Check for duplicate sequence IDs",
"seen",
"=",
"set",
"(",
")",
"dupes",
"=",
"set",
"(",
")",
"for",
"rec",
"in",
"rest",
":",
"if",
"rec",
"[",
"'id'",
"]",
"in",
"seen",
":",
"dupes",
".",
"add",
"(",
"rec",
"[",
"'id'",
"]",
")",
"else",
":",
"seen",
".",
"add",
"(",
"rec",
"[",
"'id'",
"]",
")",
"if",
"dupes",
":",
"raise",
"ValueError",
"(",
"\"Duplicate sequences:\\n\"",
"+",
"'\\n'",
".",
"join",
"(",
"dupes",
")",
")",
"curr_shift",
"=",
"{",
"}",
"curr_resn",
"=",
"{",
"}",
"# NB: consensus doesn't have head/tail, but other sequences may",
"for",
"rec",
"in",
"rest",
":",
"# Count inserts seen so far -- shift string indexes by this far ahead to",
"# get the \"equivalent\" location in the sequence string",
"# - as in, how far ahead in the current seq do we need to jump to get",
"# to a position equivalent to what's in the consensus?",
"# - can this ever be less than 0 (==consensus)? No, because that's",
"# where gaps come from. Good.",
"curr_shift",
"[",
"rec",
"[",
"'id'",
"]",
"]",
"=",
"0",
"# Residue number in the actual sequence at the current (shifted)",
"# location",
"# curr_posn[id] = current equivalent res.num in `id` to cons[i]",
"curr_resn",
"[",
"rec",
"[",
"'id'",
"]",
"]",
"=",
"rec",
"[",
"'head_len'",
"]",
"equivalencies",
"=",
"dict",
"(",
"(",
"i",
"+",
"1",
",",
"{",
"}",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"consensus",
")",
")",
")",
"# Map each character position i in the consensus sequence",
"# to equivalent residues in each of the other sequences",
"# i = index in the consensus string (== consensus res.num - 1)",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"consensus",
")",
":",
"assert",
"char",
".",
"isupper",
"(",
")",
"for",
"rec",
"in",
"rest",
":",
"rid",
"=",
"rec",
"[",
"'id'",
"]",
"strposn",
"=",
"i",
"+",
"curr_shift",
"[",
"rid",
"]",
"if",
"rec",
"[",
"'seq'",
"]",
"[",
"strposn",
"]",
".",
"isupper",
"(",
")",
":",
"# Match",
"curr_resn",
"[",
"rid",
"]",
"+=",
"1",
"elif",
"rec",
"[",
"'seq'",
"]",
"[",
"strposn",
"]",
".",
"islower",
"(",
")",
":",
"# Insert",
"while",
"rec",
"[",
"'seq'",
"]",
"[",
"strposn",
"]",
".",
"islower",
"(",
")",
":",
"# Count the whole insert size",
"curr_shift",
"[",
"rid",
"]",
"+=",
"1",
"curr_resn",
"[",
"rid",
"]",
"+=",
"1",
"strposn",
"+=",
"1",
"curr_resn",
"[",
"rid",
"]",
"+=",
"1",
"# Count the next match, too",
"else",
":",
"# Deletion / gap",
"assert",
"rec",
"[",
"'seq'",
"]",
"[",
"strposn",
"]",
"in",
"'.-'",
"continue",
"equivalencies",
"[",
"i",
"+",
"1",
"]",
"[",
"rid",
"]",
"=",
"curr_resn",
"[",
"rid",
"]",
"return",
"equivalencies"
]
| Create a mapping of equivalent residue positions to consensus.
Build a dict-of-dicts::
{consensus-posn: {id: equiv-posn, id: equiv-posn, ...}, ...}
The first sequence in the alignment is assumed to be the (gapless) consensus
sequence. | [
"Create",
"a",
"mapping",
"of",
"equivalent",
"residue",
"positions",
"to",
"consensus",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L87-L156 | train |
etal/biocma | biocma/utils.py | get_inserts | def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences']) | python | def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences']) | [
"def",
"get_inserts",
"(",
"block",
")",
":",
"def",
"find_inserts",
"(",
"seq",
",",
"head_len",
")",
":",
"\"\"\"Locate the lowercase regions in a character sequence.\n\n Yield the insert ranges as tuples using 1-based numbering, shifted by\n head_len.\n \"\"\"",
"in_insert",
"=",
"False",
"curr_start",
"=",
"None",
"deletions",
"=",
"0",
"for",
"idx",
",",
"is_lower",
"in",
"enumerate",
"(",
"map",
"(",
"str",
".",
"islower",
",",
"seq",
")",
")",
":",
"if",
"is_lower",
":",
"if",
"not",
"in_insert",
":",
"# Start of a new insert region",
"curr_start",
"=",
"head_len",
"+",
"idx",
"+",
"1",
"-",
"deletions",
"in_insert",
"=",
"True",
"else",
":",
"if",
"in_insert",
":",
"# End of the current insert region",
"yield",
"(",
"curr_start",
",",
"head_len",
"+",
"idx",
"-",
"deletions",
")",
"in_insert",
"=",
"False",
"if",
"seq",
"[",
"idx",
"]",
"==",
"'-'",
":",
"deletions",
"+=",
"1",
"return",
"dict",
"(",
"(",
"record",
"[",
"'id'",
"]",
",",
"list",
"(",
"find_inserts",
"(",
"record",
"[",
"'seq'",
"]",
",",
"record",
"[",
"'head_len'",
"]",
")",
")",
")",
"for",
"record",
"in",
"block",
"[",
"'sequences'",
"]",
")"
]
| Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...} | [
"Identify",
"the",
"inserts",
"in",
"sequence",
"in",
"a",
"block",
"."
]
| eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7 | https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L159-L200 | train |
computational-metabolomics/msp2db | msp2db/re.py | get_meta_regex | def get_meta_regex(schema='mona'):
""" Create a dictionary of regex for extracting the meta data for the spectra
"""
# NOTE: will just ignore cases, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['collision_energy'] = ['^collision energy(?:=|:)(.*)$']
meta_parse['ms_level'] = ['^ms.*level(?:=|:)\D*(\d*)$', '^ms type(?:=|:)\D*(\d*)$',
'^Spectrum_type(?:=|:)\D*(\d*)$']
meta_parse['accession'] = ['^accession(?:=|:)(.*)$', '^DB#(?:=|:)(.*)$']
meta_parse['resolution'] = ['^resolution(?:=|:)(.*)$']
meta_parse['polarity'] = ['^ion.*mode(?:=|:)(.*)$', '^ionization.*mode(?:=|:)(.*)$', '^polarity(?:=|:)(.*)$']
meta_parse['fragmentation_type'] = ['^fragmentation.*mode(?:=|:)(.*)$', '^fragmentation.*type(?:=|:)(.*)$']
meta_parse['precursor_mz'] = ['^precursor m/z(?:=|:)\s*(\d*[.,]?\d*)$', '^precursor.*mz(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['precursor_type'] = ['^precursor.*type(?:=|:)(.*)$', '^adduct(?:=|:)(.*)$']
meta_parse['instrument_type'] = ['^instrument.*type(?:=|:)(.*)$']
meta_parse['instrument'] = ['^instrument(?:=|:)(.*)$']
meta_parse['copyright'] = ['^copyright(?:=|:)(.*)$']
# meta_parse['column'] = ['^column(?:=|:)(.*)$']
meta_parse['mass_accuracy'] = ['^mass.*accuracy(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['mass_error'] = ['^mass.*error(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['origin'] = ['^origin(?:=|:)(.*)$']
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['splash'] = ['^splash:(.*)$']
meta_parse['retention_time'] = ['^retention.*time(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['retention_index'] = ['^retention.*index(?:=|:)\s*(\d*[.,]?\d*)$']
elif schema == 'massbank':
meta_parse['collision_energy'] = ['^AC\$MASS_SPECTROMETRY:\s+COLLISION_ENERGY\s+(.*)$']
meta_parse['ms_level'] = ['^AC\$MASS_SPECTROMETRY:\s+MS_TYPE\s+\D*(\d*)$']
meta_parse['accession'] = ['^ACCESSION:(.*)$']
meta_parse['resolution'] = ['^AC\$MASS_SPECTROMETRY:\s+RESOLUTION\s+(.*)$']
meta_parse['polarity'] = ['^AC\$MASS_SPECTROMETRY:\s+ION_MODE\s+(.*)$']
meta_parse['fragmentation_type'] = ['^AC\$MASS_SPECTROMETRY:\s+FRAGMENTATION_MODE\s+(.*)$']
meta_parse['precursor_mz'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_M/Z\s+(\d*[.,]?\d*)$']
meta_parse['precursor_type'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_TYPE\s+(.*)$']
meta_parse['instrument_type'] = ['^AC\$INSTRUMENT_TYPE:\s+(.*)$']
meta_parse['instrument'] = ['^AC\$INSTRUMENT:\s+(.*)$']
meta_parse['copyright'] = ['^COPYRIGHT:\s+(.*)']
# meta_parse['column'] = ['^column(?:=|:)(.*)$']
meta_parse['mass_accuracy'] = ['^AC\$MASS_SPECTROMETRY:\s+ACCURACY\s+(.*)$'] # need to check
meta_parse['mass_error'] = ['^AC\$MASS_SPECTROMETRY:\s+ERROR\s+(.*)$'] # need to check
meta_parse['splash'] = ['^PK\$SPLASH:\s+(.*)$']
meta_parse['origin'] = ['^origin(?:=|:)(.*)$']
meta_parse['name'] = ['^RECORD_TITLE:\s+(.*)$']
meta_parse['retention_time'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*TIME\s+(\d*[.,]?\d*)$']
meta_parse['retention_index'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*INDEX\s+(\d*[.,]?\d*)$']
return meta_parse | python | def get_meta_regex(schema='mona'):
""" Create a dictionary of regex for extracting the meta data for the spectra
"""
# NOTE: will just ignore cases, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['collision_energy'] = ['^collision energy(?:=|:)(.*)$']
meta_parse['ms_level'] = ['^ms.*level(?:=|:)\D*(\d*)$', '^ms type(?:=|:)\D*(\d*)$',
'^Spectrum_type(?:=|:)\D*(\d*)$']
meta_parse['accession'] = ['^accession(?:=|:)(.*)$', '^DB#(?:=|:)(.*)$']
meta_parse['resolution'] = ['^resolution(?:=|:)(.*)$']
meta_parse['polarity'] = ['^ion.*mode(?:=|:)(.*)$', '^ionization.*mode(?:=|:)(.*)$', '^polarity(?:=|:)(.*)$']
meta_parse['fragmentation_type'] = ['^fragmentation.*mode(?:=|:)(.*)$', '^fragmentation.*type(?:=|:)(.*)$']
meta_parse['precursor_mz'] = ['^precursor m/z(?:=|:)\s*(\d*[.,]?\d*)$', '^precursor.*mz(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['precursor_type'] = ['^precursor.*type(?:=|:)(.*)$', '^adduct(?:=|:)(.*)$']
meta_parse['instrument_type'] = ['^instrument.*type(?:=|:)(.*)$']
meta_parse['instrument'] = ['^instrument(?:=|:)(.*)$']
meta_parse['copyright'] = ['^copyright(?:=|:)(.*)$']
# meta_parse['column'] = ['^column(?:=|:)(.*)$']
meta_parse['mass_accuracy'] = ['^mass.*accuracy(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['mass_error'] = ['^mass.*error(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['origin'] = ['^origin(?:=|:)(.*)$']
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['splash'] = ['^splash:(.*)$']
meta_parse['retention_time'] = ['^retention.*time(?:=|:)\s*(\d*[.,]?\d*)$']
meta_parse['retention_index'] = ['^retention.*index(?:=|:)\s*(\d*[.,]?\d*)$']
elif schema == 'massbank':
meta_parse['collision_energy'] = ['^AC\$MASS_SPECTROMETRY:\s+COLLISION_ENERGY\s+(.*)$']
meta_parse['ms_level'] = ['^AC\$MASS_SPECTROMETRY:\s+MS_TYPE\s+\D*(\d*)$']
meta_parse['accession'] = ['^ACCESSION:(.*)$']
meta_parse['resolution'] = ['^AC\$MASS_SPECTROMETRY:\s+RESOLUTION\s+(.*)$']
meta_parse['polarity'] = ['^AC\$MASS_SPECTROMETRY:\s+ION_MODE\s+(.*)$']
meta_parse['fragmentation_type'] = ['^AC\$MASS_SPECTROMETRY:\s+FRAGMENTATION_MODE\s+(.*)$']
meta_parse['precursor_mz'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_M/Z\s+(\d*[.,]?\d*)$']
meta_parse['precursor_type'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_TYPE\s+(.*)$']
meta_parse['instrument_type'] = ['^AC\$INSTRUMENT_TYPE:\s+(.*)$']
meta_parse['instrument'] = ['^AC\$INSTRUMENT:\s+(.*)$']
meta_parse['copyright'] = ['^COPYRIGHT:\s+(.*)']
# meta_parse['column'] = ['^column(?:=|:)(.*)$']
meta_parse['mass_accuracy'] = ['^AC\$MASS_SPECTROMETRY:\s+ACCURACY\s+(.*)$'] # need to check
meta_parse['mass_error'] = ['^AC\$MASS_SPECTROMETRY:\s+ERROR\s+(.*)$'] # need to check
meta_parse['splash'] = ['^PK\$SPLASH:\s+(.*)$']
meta_parse['origin'] = ['^origin(?:=|:)(.*)$']
meta_parse['name'] = ['^RECORD_TITLE:\s+(.*)$']
meta_parse['retention_time'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*TIME\s+(\d*[.,]?\d*)$']
meta_parse['retention_index'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*INDEX\s+(\d*[.,]?\d*)$']
return meta_parse | [
"def",
"get_meta_regex",
"(",
"schema",
"=",
"'mona'",
")",
":",
"# NOTE: will just ignore cases, to avoid repetition here",
"meta_parse",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"if",
"schema",
"==",
"'mona'",
":",
"meta_parse",
"[",
"'collision_energy'",
"]",
"=",
"[",
"'^collision energy(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'ms_level'",
"]",
"=",
"[",
"'^ms.*level(?:=|:)\\D*(\\d*)$'",
",",
"'^ms type(?:=|:)\\D*(\\d*)$'",
",",
"'^Spectrum_type(?:=|:)\\D*(\\d*)$'",
"]",
"meta_parse",
"[",
"'accession'",
"]",
"=",
"[",
"'^accession(?:=|:)(.*)$'",
",",
"'^DB#(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'resolution'",
"]",
"=",
"[",
"'^resolution(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'polarity'",
"]",
"=",
"[",
"'^ion.*mode(?:=|:)(.*)$'",
",",
"'^ionization.*mode(?:=|:)(.*)$'",
",",
"'^polarity(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'fragmentation_type'",
"]",
"=",
"[",
"'^fragmentation.*mode(?:=|:)(.*)$'",
",",
"'^fragmentation.*type(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'precursor_mz'",
"]",
"=",
"[",
"'^precursor m/z(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
",",
"'^precursor.*mz(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'precursor_type'",
"]",
"=",
"[",
"'^precursor.*type(?:=|:)(.*)$'",
",",
"'^adduct(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'instrument_type'",
"]",
"=",
"[",
"'^instrument.*type(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'instrument'",
"]",
"=",
"[",
"'^instrument(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'copyright'",
"]",
"=",
"[",
"'^copyright(?:=|:)(.*)$'",
"]",
"# meta_parse['column'] = ['^column(?:=|:)(.*)$']",
"meta_parse",
"[",
"'mass_accuracy'",
"]",
"=",
"[",
"'^mass.*accuracy(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'mass_error'",
"]",
"=",
"[",
"'^mass.*error(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'origin'",
"]",
"=",
"[",
"'^origin(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'name'",
"]",
"=",
"[",
"'^Name(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'splash'",
"]",
"=",
"[",
"'^splash:(.*)$'",
"]",
"meta_parse",
"[",
"'retention_time'",
"]",
"=",
"[",
"'^retention.*time(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'retention_index'",
"]",
"=",
"[",
"'^retention.*index(?:=|:)\\s*(\\d*[.,]?\\d*)$'",
"]",
"elif",
"schema",
"==",
"'massbank'",
":",
"meta_parse",
"[",
"'collision_energy'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+COLLISION_ENERGY\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'ms_level'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+MS_TYPE\\s+\\D*(\\d*)$'",
"]",
"meta_parse",
"[",
"'accession'",
"]",
"=",
"[",
"'^ACCESSION:(.*)$'",
"]",
"meta_parse",
"[",
"'resolution'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+RESOLUTION\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'polarity'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+ION_MODE\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'fragmentation_type'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+FRAGMENTATION_MODE\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'precursor_mz'",
"]",
"=",
"[",
"'^MS\\$FOCUSED_ION:\\s+PRECURSOR_M/Z\\s+(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'precursor_type'",
"]",
"=",
"[",
"'^MS\\$FOCUSED_ION:\\s+PRECURSOR_TYPE\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'instrument_type'",
"]",
"=",
"[",
"'^AC\\$INSTRUMENT_TYPE:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'instrument'",
"]",
"=",
"[",
"'^AC\\$INSTRUMENT:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'copyright'",
"]",
"=",
"[",
"'^COPYRIGHT:\\s+(.*)'",
"]",
"# meta_parse['column'] = ['^column(?:=|:)(.*)$']",
"meta_parse",
"[",
"'mass_accuracy'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+ACCURACY\\s+(.*)$'",
"]",
"# need to check",
"meta_parse",
"[",
"'mass_error'",
"]",
"=",
"[",
"'^AC\\$MASS_SPECTROMETRY:\\s+ERROR\\s+(.*)$'",
"]",
"# need to check",
"meta_parse",
"[",
"'splash'",
"]",
"=",
"[",
"'^PK\\$SPLASH:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'origin'",
"]",
"=",
"[",
"'^origin(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'name'",
"]",
"=",
"[",
"'^RECORD_TITLE:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'retention_time'",
"]",
"=",
"[",
"'^AC\\$CHROMATOGRAPHY:\\s+RETENTION.*TIME\\s+(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'retention_index'",
"]",
"=",
"[",
"'^AC\\$CHROMATOGRAPHY:\\s+RETENTION.*INDEX\\s+(\\d*[.,]?\\d*)$'",
"]",
"return",
"meta_parse"
]
| Create a dictionary of regex for extracting the meta data for the spectra | [
"Create",
"a",
"dictionary",
"of",
"regex",
"for",
"extracting",
"the",
"meta",
"data",
"for",
"the",
"spectra"
]
| f86f01efca26fd2745547c9993f97337c6bef123 | https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/re.py#L5-L57 | train |
computational-metabolomics/msp2db | msp2db/re.py | get_compound_regex | def get_compound_regex(schema='mona'):
""" Create a dictionary of regex for extracting the compound information for the spectra
"""
# NOTE: will just ignore cases in the regex, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['inchikey_id'] = ['^inchikey(?:=|:)(.*)$']
meta_parse['molecular_formula'] = ['^molecular formula(?:=|:)(.*)$', '^formula:(.*)$']
meta_parse['molecular_weight'] = ['^MW(?:=|:)(\d*[.,]?\d*)$']
meta_parse['pubchem_id'] = ['^pubchem.*cid(?:=|:)(\d*)".*$']
meta_parse['chemspider_id'] = ['^chemspider(?:=|:)(\d*)".*$']
meta_parse['compound_class'] = ['^compound.*class(?:=|:)(.*)$']
meta_parse['exact_mass'] = ['^exact.*mass(?:=|:)(\d*[.,]?\d*)$']
meta_parse['smiles'] = ['^SMILES(?:=|:)(.*)$']
meta_parse['other_names'] = ['^Synonym(?:=|:)(.*)$']
elif schema == 'massbank':
meta_parse['name'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['other_names'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['inchikey_id'] = ['^CH\$LINK:\s+INCHIKEY\s+(.*)$']
meta_parse['molecular_formula'] = ['^CH\$FORMULA:\s+(.*)$']
meta_parse['molecular_weight'] = ['^CH\$MOLECULAR_WEIGHT:\s+(.*)$']
meta_parse['pubchem_id'] = ['^CH\$LINK:\s+PUBCHEM\s+CID:(.*)$']
meta_parse['chemspider_id'] = ['^CH\$LINK:\s+CHEMSPIDER\s+(.*)$']
meta_parse['compound_class'] = ['^CH\$COMPOUND_CLASS:\s+(.*)$']
meta_parse['exact_mass'] = ['^CH\$EXACT_MASS:\s+(.*)$']
meta_parse['smiles'] = ['^CH\$SMILES:\s+(.*)$']
return meta_parse | python | def get_compound_regex(schema='mona'):
""" Create a dictionary of regex for extracting the compound information for the spectra
"""
# NOTE: will just ignore cases in the regex, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['inchikey_id'] = ['^inchikey(?:=|:)(.*)$']
meta_parse['molecular_formula'] = ['^molecular formula(?:=|:)(.*)$', '^formula:(.*)$']
meta_parse['molecular_weight'] = ['^MW(?:=|:)(\d*[.,]?\d*)$']
meta_parse['pubchem_id'] = ['^pubchem.*cid(?:=|:)(\d*)".*$']
meta_parse['chemspider_id'] = ['^chemspider(?:=|:)(\d*)".*$']
meta_parse['compound_class'] = ['^compound.*class(?:=|:)(.*)$']
meta_parse['exact_mass'] = ['^exact.*mass(?:=|:)(\d*[.,]?\d*)$']
meta_parse['smiles'] = ['^SMILES(?:=|:)(.*)$']
meta_parse['other_names'] = ['^Synonym(?:=|:)(.*)$']
elif schema == 'massbank':
meta_parse['name'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['other_names'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['inchikey_id'] = ['^CH\$LINK:\s+INCHIKEY\s+(.*)$']
meta_parse['molecular_formula'] = ['^CH\$FORMULA:\s+(.*)$']
meta_parse['molecular_weight'] = ['^CH\$MOLECULAR_WEIGHT:\s+(.*)$']
meta_parse['pubchem_id'] = ['^CH\$LINK:\s+PUBCHEM\s+CID:(.*)$']
meta_parse['chemspider_id'] = ['^CH\$LINK:\s+CHEMSPIDER\s+(.*)$']
meta_parse['compound_class'] = ['^CH\$COMPOUND_CLASS:\s+(.*)$']
meta_parse['exact_mass'] = ['^CH\$EXACT_MASS:\s+(.*)$']
meta_parse['smiles'] = ['^CH\$SMILES:\s+(.*)$']
return meta_parse | [
"def",
"get_compound_regex",
"(",
"schema",
"=",
"'mona'",
")",
":",
"# NOTE: will just ignore cases in the regex, to avoid repetition here",
"meta_parse",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"if",
"schema",
"==",
"'mona'",
":",
"meta_parse",
"[",
"'name'",
"]",
"=",
"[",
"'^Name(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'inchikey_id'",
"]",
"=",
"[",
"'^inchikey(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'molecular_formula'",
"]",
"=",
"[",
"'^molecular formula(?:=|:)(.*)$'",
",",
"'^formula:(.*)$'",
"]",
"meta_parse",
"[",
"'molecular_weight'",
"]",
"=",
"[",
"'^MW(?:=|:)(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'pubchem_id'",
"]",
"=",
"[",
"'^pubchem.*cid(?:=|:)(\\d*)\".*$'",
"]",
"meta_parse",
"[",
"'chemspider_id'",
"]",
"=",
"[",
"'^chemspider(?:=|:)(\\d*)\".*$'",
"]",
"meta_parse",
"[",
"'compound_class'",
"]",
"=",
"[",
"'^compound.*class(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'exact_mass'",
"]",
"=",
"[",
"'^exact.*mass(?:=|:)(\\d*[.,]?\\d*)$'",
"]",
"meta_parse",
"[",
"'smiles'",
"]",
"=",
"[",
"'^SMILES(?:=|:)(.*)$'",
"]",
"meta_parse",
"[",
"'other_names'",
"]",
"=",
"[",
"'^Synonym(?:=|:)(.*)$'",
"]",
"elif",
"schema",
"==",
"'massbank'",
":",
"meta_parse",
"[",
"'name'",
"]",
"=",
"[",
"'^CH\\$NAME:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'other_names'",
"]",
"=",
"[",
"'^CH\\$NAME:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'inchikey_id'",
"]",
"=",
"[",
"'^CH\\$LINK:\\s+INCHIKEY\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'molecular_formula'",
"]",
"=",
"[",
"'^CH\\$FORMULA:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'molecular_weight'",
"]",
"=",
"[",
"'^CH\\$MOLECULAR_WEIGHT:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'pubchem_id'",
"]",
"=",
"[",
"'^CH\\$LINK:\\s+PUBCHEM\\s+CID:(.*)$'",
"]",
"meta_parse",
"[",
"'chemspider_id'",
"]",
"=",
"[",
"'^CH\\$LINK:\\s+CHEMSPIDER\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'compound_class'",
"]",
"=",
"[",
"'^CH\\$COMPOUND_CLASS:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'exact_mass'",
"]",
"=",
"[",
"'^CH\\$EXACT_MASS:\\s+(.*)$'",
"]",
"meta_parse",
"[",
"'smiles'",
"]",
"=",
"[",
"'^CH\\$SMILES:\\s+(.*)$'",
"]",
"return",
"meta_parse"
]
| Create a dictionary of regex for extracting the compound information for the spectra | [
"Create",
"a",
"dictionary",
"of",
"regex",
"for",
"extracting",
"the",
"compound",
"information",
"for",
"the",
"spectra"
]
| f86f01efca26fd2745547c9993f97337c6bef123 | https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/re.py#L60-L91 | train |
lowandrew/OLCTools | coreGenome/coretyper.py | CoreTyper.handler | def handler(self):
"""Run the required analyses"""
printtime('Creating and populating objects', self.start)
self.populate()
printtime('Populating {} sequence profiles'.format(self.analysistype), self.start)
self.profiler()
# Annotate sequences with prokka
self.annotatethreads()
# Run the analyses
self.cdsthreads()
# Find core coding features
self.cdssequencethreads()
# Extract the sequence for each coding feature
self.allelematchthreads()
# Determine sequence types from the analyses
printtime('Determining {} sequence types'.format(self.analysistype), self.start)
self.sequencetyper()
# Create reports
printtime('Creating {} reports'.format(self.analysistype), self.start)
self.reporter() | python | def handler(self):
"""Run the required analyses"""
printtime('Creating and populating objects', self.start)
self.populate()
printtime('Populating {} sequence profiles'.format(self.analysistype), self.start)
self.profiler()
# Annotate sequences with prokka
self.annotatethreads()
# Run the analyses
self.cdsthreads()
# Find core coding features
self.cdssequencethreads()
# Extract the sequence for each coding feature
self.allelematchthreads()
# Determine sequence types from the analyses
printtime('Determining {} sequence types'.format(self.analysistype), self.start)
self.sequencetyper()
# Create reports
printtime('Creating {} reports'.format(self.analysistype), self.start)
self.reporter() | [
"def",
"handler",
"(",
"self",
")",
":",
"printtime",
"(",
"'Creating and populating objects'",
",",
"self",
".",
"start",
")",
"self",
".",
"populate",
"(",
")",
"printtime",
"(",
"'Populating {} sequence profiles'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
",",
"self",
".",
"start",
")",
"self",
".",
"profiler",
"(",
")",
"# Annotate sequences with prokka",
"self",
".",
"annotatethreads",
"(",
")",
"# Run the analyses",
"self",
".",
"cdsthreads",
"(",
")",
"# Find core coding features",
"self",
".",
"cdssequencethreads",
"(",
")",
"# Extract the sequence for each coding feature",
"self",
".",
"allelematchthreads",
"(",
")",
"# Determine sequence types from the analyses",
"printtime",
"(",
"'Determining {} sequence types'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
",",
"self",
".",
"start",
")",
"self",
".",
"sequencetyper",
"(",
")",
"# Create reports",
"printtime",
"(",
"'Creating {} reports'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
",",
"self",
".",
"start",
")",
"self",
".",
"reporter",
"(",
")"
]
| Run the required analyses | [
"Run",
"the",
"required",
"analyses"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L19-L38 | train |
lowandrew/OLCTools | coreGenome/coretyper.py | CoreTyper.annotatethreads | def annotatethreads(self):
"""
Use prokka to annotate each strain
"""
# Move the files to subfolders and create objects
self.runmetadata = createobject.ObjectCreation(self)
# Fix headers
self.headers()
printtime('Performing prokka analyses', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.annotate, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Create the prokka attribute in the metadata object
setattr(sample, 'prokka', GenObject())
sample.prokka.outputdir = os.path.join(sample.general.outputdirectory, 'prokka')
if not os.path.isdir(sample.prokka.outputdir):
os.makedirs(sample.prokka.outputdir)
# TODO Incorporate MASH/rMLST/user inputted genus, species results in the system call
# Create the system call
# prokka 2014-SEQ-0275.fasta --force --genus Escherichia --species coli --usegenus --addgenes
# --prefix 2014-SEQ-0275 --locustag EC0275 --outputdir /path/to/sequences/2014-SEQ-0275/prokka
sample.prokka.command = 'prokka {} ' \
'--force ' \
'--genus {} ' \
'--species {} ' \
'--usegenus ' \
'--addgenes ' \
'--prefix {} ' \
'--locustag {} ' \
'--outdir {}' \
.format(sample.general.fixedheaders,
self.genus, self.species, sample.name, sample.name, sample.prokka.outputdir)
self.queue.put(sample)
self.queue.join() | python | def annotatethreads(self):
"""
Use prokka to annotate each strain
"""
# Move the files to subfolders and create objects
self.runmetadata = createobject.ObjectCreation(self)
# Fix headers
self.headers()
printtime('Performing prokka analyses', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.annotate, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Create the prokka attribute in the metadata object
setattr(sample, 'prokka', GenObject())
sample.prokka.outputdir = os.path.join(sample.general.outputdirectory, 'prokka')
if not os.path.isdir(sample.prokka.outputdir):
os.makedirs(sample.prokka.outputdir)
# TODO Incorporate MASH/rMLST/user inputted genus, species results in the system call
# Create the system call
# prokka 2014-SEQ-0275.fasta --force --genus Escherichia --species coli --usegenus --addgenes
# --prefix 2014-SEQ-0275 --locustag EC0275 --outputdir /path/to/sequences/2014-SEQ-0275/prokka
sample.prokka.command = 'prokka {} ' \
'--force ' \
'--genus {} ' \
'--species {} ' \
'--usegenus ' \
'--addgenes ' \
'--prefix {} ' \
'--locustag {} ' \
'--outdir {}' \
.format(sample.general.fixedheaders,
self.genus, self.species, sample.name, sample.name, sample.prokka.outputdir)
self.queue.put(sample)
self.queue.join() | [
"def",
"annotatethreads",
"(",
"self",
")",
":",
"# Move the files to subfolders and create objects",
"self",
".",
"runmetadata",
"=",
"createobject",
".",
"ObjectCreation",
"(",
"self",
")",
"# Fix headers",
"self",
".",
"headers",
"(",
")",
"printtime",
"(",
"'Performing prokka analyses'",
",",
"self",
".",
"start",
")",
"# Create and start threads",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"# Send the threads to the appropriate destination function",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"annotate",
",",
"args",
"=",
"(",
")",
")",
"# Set the daemon to true - something to do with thread management",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"# Start the threading",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
".",
"samples",
":",
"# Create the prokka attribute in the metadata object",
"setattr",
"(",
"sample",
",",
"'prokka'",
",",
"GenObject",
"(",
")",
")",
"sample",
".",
"prokka",
".",
"outputdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'prokka'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"sample",
".",
"prokka",
".",
"outputdir",
")",
":",
"os",
".",
"makedirs",
"(",
"sample",
".",
"prokka",
".",
"outputdir",
")",
"# TODO Incorporate MASH/rMLST/user inputted genus, species results in the system call",
"# Create the system call",
"# prokka 2014-SEQ-0275.fasta --force --genus Escherichia --species coli --usegenus --addgenes",
"# --prefix 2014-SEQ-0275 --locustag EC0275 --outputdir /path/to/sequences/2014-SEQ-0275/prokka",
"sample",
".",
"prokka",
".",
"command",
"=",
"'prokka {} '",
"'--force '",
"'--genus {} '",
"'--species {} '",
"'--usegenus '",
"'--addgenes '",
"'--prefix {} '",
"'--locustag {} '",
"'--outdir {}'",
".",
"format",
"(",
"sample",
".",
"general",
".",
"fixedheaders",
",",
"self",
".",
"genus",
",",
"self",
".",
"species",
",",
"sample",
".",
"name",
",",
"sample",
".",
"name",
",",
"sample",
".",
"prokka",
".",
"outputdir",
")",
"self",
".",
"queue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"queue",
".",
"join",
"(",
")"
]
| Use prokka to annotate each strain | [
"Use",
"prokka",
"to",
"annotate",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L100-L139 | train |
lowandrew/OLCTools | coreGenome/coretyper.py | CoreTyper.cdsthreads | def cdsthreads(self):
"""
Determines which core genes from a pre-calculated database are present in each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cds, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
#
sample[self.analysistype].corepresence = dict()
self.cdsqueue.put(sample)
self.cdsqueue.join() | python | def cdsthreads(self):
"""
Determines which core genes from a pre-calculated database are present in each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cds, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
#
sample[self.analysistype].corepresence = dict()
self.cdsqueue.put(sample)
self.cdsqueue.join() | [
"def",
"cdsthreads",
"(",
"self",
")",
":",
"# Create and start threads",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"# Send the threads to the appropriate destination function",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"cds",
",",
"args",
"=",
"(",
")",
")",
"# Set the daemon to true - something to do with thread management",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"# Start the threading",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
".",
"samples",
":",
"#",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"corepresence",
"=",
"dict",
"(",
")",
"self",
".",
"cdsqueue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"cdsqueue",
".",
"join",
"(",
")"
]
| Determines which core genes from a pre-calculated database are present in each strain | [
"Determines",
"which",
"core",
"genes",
"from",
"a",
"pre",
"-",
"calculated",
"database",
"are",
"present",
"in",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L207-L223 | train |
lowandrew/OLCTools | coreGenome/coretyper.py | CoreTyper.cdssequencethreads | def cdssequencethreads(self):
"""
Extracts the sequence of each gene for each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cdssequence, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Initialise a dictionary to store the sequence of each core gene
sample[self.analysistype].coresequence = dict()
self.sequencequeue.put(sample)
self.sequencequeue.join() | python | def cdssequencethreads(self):
"""
Extracts the sequence of each gene for each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cdssequence, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Initialise a dictionary to store the sequence of each core gene
sample[self.analysistype].coresequence = dict()
self.sequencequeue.put(sample)
self.sequencequeue.join() | [
"def",
"cdssequencethreads",
"(",
"self",
")",
":",
"# Create and start threads",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"# Send the threads to the appropriate destination function",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"cdssequence",
",",
"args",
"=",
"(",
")",
")",
"# Set the daemon to true - something to do with thread management",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"# Start the threading",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
".",
"samples",
":",
"# Initialise a dictionary to store the sequence of each core gene",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"coresequence",
"=",
"dict",
"(",
")",
"self",
".",
"sequencequeue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"sequencequeue",
".",
"join",
"(",
")"
]
| Extracts the sequence of each gene for each strain | [
"Extracts",
"the",
"sequence",
"of",
"each",
"gene",
"for",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L247-L263 | train |
lowandrew/OLCTools | coreGenome/coretyper.py | CoreTyper.allelematchthreads | def allelematchthreads(self):
"""
Determine allele of each gene
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.allelematch, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
sample[self.analysistype].allelematches = dict()
self.allelequeue.put(sample)
self.allelequeue.join() | python | def allelematchthreads(self):
"""
Determine allele of each gene
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.allelematch, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
sample[self.analysistype].allelematches = dict()
self.allelequeue.put(sample)
self.allelequeue.join() | [
"def",
"allelematchthreads",
"(",
"self",
")",
":",
"# Create and start threads",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cpus",
")",
":",
"# Send the threads to the appropriate destination function",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"allelematch",
",",
"args",
"=",
"(",
")",
")",
"# Set the daemon to true - something to do with thread management",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"# Start the threading",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
".",
"samples",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"allelematches",
"=",
"dict",
"(",
")",
"self",
".",
"allelequeue",
".",
"put",
"(",
"sample",
")",
"self",
".",
"allelequeue",
".",
"join",
"(",
")"
]
| Determine allele of each gene | [
"Determine",
"allele",
"of",
"each",
"gene"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/coretyper.py#L274-L289 | train |
by46/simplekit | simplekit/url/path.py | remove_path_segments | def remove_path_segments(segments, removes):
"""Removes the removes from the tail of segments.
Examples::
>>> # '/a/b/c' - 'b/c' == '/a/'
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['b', 'c']) == ['', 'a', '']
>>> # '/a/b/c' - '/b/c' == '/a
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['', 'b', 'c']) == ['', 'a']
:param segments: :class:`list`, a list of the path segment
:param removes: :class:`list`, a list of the path segment
:return: :class:`list`, The list of all remaining path segments after all segments
in ``removes`` have been removed from the end of ``segments``. If no segment
from ``removes`` were removed from the ``segments``, the ``segments`` is
return unmodified.
"""
if segments == ['']:
segments.append('')
if removes == ['']:
removes.append('')
if segments == removes:
ret = []
elif len(removes) > len(segments):
ret = segments
else:
# TODO(benjamin): incomplete
removes2 = list(removes)
if len(removes) > 1 and removes[0] == '':
removes2.pop(0)
if removes2 and removes2 == segments[-1 * len(removes2):]:
ret = segments[:len(segments) - len(removes2)]
if removes[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret | python | def remove_path_segments(segments, removes):
"""Removes the removes from the tail of segments.
Examples::
>>> # '/a/b/c' - 'b/c' == '/a/'
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['b', 'c']) == ['', 'a', '']
>>> # '/a/b/c' - '/b/c' == '/a
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['', 'b', 'c']) == ['', 'a']
:param segments: :class:`list`, a list of the path segment
:param removes: :class:`list`, a list of the path segment
:return: :class:`list`, The list of all remaining path segments after all segments
in ``removes`` have been removed from the end of ``segments``. If no segment
from ``removes`` were removed from the ``segments``, the ``segments`` is
return unmodified.
"""
if segments == ['']:
segments.append('')
if removes == ['']:
removes.append('')
if segments == removes:
ret = []
elif len(removes) > len(segments):
ret = segments
else:
# TODO(benjamin): incomplete
removes2 = list(removes)
if len(removes) > 1 and removes[0] == '':
removes2.pop(0)
if removes2 and removes2 == segments[-1 * len(removes2):]:
ret = segments[:len(segments) - len(removes2)]
if removes[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret | [
"def",
"remove_path_segments",
"(",
"segments",
",",
"removes",
")",
":",
"if",
"segments",
"==",
"[",
"''",
"]",
":",
"segments",
".",
"append",
"(",
"''",
")",
"if",
"removes",
"==",
"[",
"''",
"]",
":",
"removes",
".",
"append",
"(",
"''",
")",
"if",
"segments",
"==",
"removes",
":",
"ret",
"=",
"[",
"]",
"elif",
"len",
"(",
"removes",
")",
">",
"len",
"(",
"segments",
")",
":",
"ret",
"=",
"segments",
"else",
":",
"# TODO(benjamin): incomplete",
"removes2",
"=",
"list",
"(",
"removes",
")",
"if",
"len",
"(",
"removes",
")",
">",
"1",
"and",
"removes",
"[",
"0",
"]",
"==",
"''",
":",
"removes2",
".",
"pop",
"(",
"0",
")",
"if",
"removes2",
"and",
"removes2",
"==",
"segments",
"[",
"-",
"1",
"*",
"len",
"(",
"removes2",
")",
":",
"]",
":",
"ret",
"=",
"segments",
"[",
":",
"len",
"(",
"segments",
")",
"-",
"len",
"(",
"removes2",
")",
"]",
"if",
"removes",
"[",
"0",
"]",
"!=",
"''",
"and",
"ret",
":",
"ret",
".",
"append",
"(",
"''",
")",
"else",
":",
"ret",
"=",
"segments",
"return",
"ret"
]
| Removes the removes from the tail of segments.
Examples::
>>> # '/a/b/c' - 'b/c' == '/a/'
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['b', 'c']) == ['', 'a', '']
>>> # '/a/b/c' - '/b/c' == '/a
>>> assert remove_path_segments(['', 'a', 'b', 'c'], ['', 'b', 'c']) == ['', 'a']
:param segments: :class:`list`, a list of the path segment
:param removes: :class:`list`, a list of the path segment
:return: :class:`list`, The list of all remaining path segments after all segments
in ``removes`` have been removed from the end of ``segments``. If no segment
from ``removes`` were removed from the ``segments``, the ``segments`` is
return unmodified. | [
"Removes",
"the",
"removes",
"from",
"the",
"tail",
"of",
"segments",
"."
]
| 33f3ce6de33accc185e1057f096af41859db5976 | https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/url/path.py#L9-L48 | train |
by46/simplekit | simplekit/url/path.py | join_path_segments | def join_path_segments(*args):
"""Join multiple list of path segments
This function is not encoding aware, it does not test for, or changed the
encoding of the path segments it's passed.
Example::
>>> assert join_path_segments(['a'], ['b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['b']) == ['a','b']
>>> assert join_path_segments(['a'], ['','b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['','b']) == ['a','','b']
>>> assert join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
:param args: optional arguments
:return: :class:`list`, the segment list of the result path
"""
finals = []
for segments in args:
if not segments or segments[0] == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments.pop(0)
finals.extend(segments)
return finals | python | def join_path_segments(*args):
"""Join multiple list of path segments
This function is not encoding aware, it does not test for, or changed the
encoding of the path segments it's passed.
Example::
>>> assert join_path_segments(['a'], ['b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['b']) == ['a','b']
>>> assert join_path_segments(['a'], ['','b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['','b']) == ['a','','b']
>>> assert join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
:param args: optional arguments
:return: :class:`list`, the segment list of the result path
"""
finals = []
for segments in args:
if not segments or segments[0] == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments.pop(0)
finals.extend(segments)
return finals | [
"def",
"join_path_segments",
"(",
"*",
"args",
")",
":",
"finals",
"=",
"[",
"]",
"for",
"segments",
"in",
"args",
":",
"if",
"not",
"segments",
"or",
"segments",
"[",
"0",
"]",
"==",
"[",
"''",
"]",
":",
"continue",
"elif",
"not",
"finals",
":",
"finals",
".",
"extend",
"(",
"segments",
")",
"else",
":",
"# Example #1: ['a',''] + ['b'] == ['a','b']",
"# Example #2: ['a',''] + ['','b'] == ['a','','b']",
"if",
"finals",
"[",
"-",
"1",
"]",
"==",
"''",
"and",
"(",
"segments",
"[",
"0",
"]",
"!=",
"''",
"or",
"len",
"(",
"segments",
")",
">",
"1",
")",
":",
"finals",
".",
"pop",
"(",
"-",
"1",
")",
"# Example: ['a'] + ['','b'] == ['a','b']",
"elif",
"finals",
"[",
"-",
"1",
"]",
"!=",
"''",
"and",
"segments",
"[",
"0",
"]",
"==",
"''",
"and",
"len",
"(",
"segments",
")",
">",
"1",
":",
"segments",
".",
"pop",
"(",
"0",
")",
"finals",
".",
"extend",
"(",
"segments",
")",
"return",
"finals"
]
| Join multiple list of path segments
This function is not encoding aware, it does not test for, or changed the
encoding of the path segments it's passed.
Example::
>>> assert join_path_segments(['a'], ['b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['b']) == ['a','b']
>>> assert join_path_segments(['a'], ['','b']) == ['a','b']
>>> assert join_path_segments(['a',''], ['','b']) == ['a','','b']
>>> assert join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
:param args: optional arguments
:return: :class:`list`, the segment list of the result path | [
"Join",
"multiple",
"list",
"of",
"path",
"segments"
]
| 33f3ce6de33accc185e1057f096af41859db5976 | https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/url/path.py#L52-L83 | train |
portfors-lab/sparkle | sparkle/data/acqdata.py | increment | def increment(index, dims, data_shape):
"""Increments a given index according to the shape of the data added
:param index: Current index to be incremented
:type index: list
:param dims: Shape of the data that the index is being incremented by
:type dims: tuple
:param data_shape: Shape of the data structure being incremented, this is check that incrementing is correct
:returns: list - the incremented index
"""
# check dimensions of data match structure
inc_to_match = data_shape[1:]
for dim_a, dim_b in zip(inc_to_match, dims[-1*(len(inc_to_match)):]):
if dim_a != dim_b:
raise DataIndexError()
# now we can safely discard all but the highest dimension
inc_index = len(index) - len(data_shape)
inc_amount = data_shape[0]
# make the index and increment amount dimensions match
index[inc_index] += inc_amount
# finally check that we did not run over allowed dimension
if index[inc_index] > dims[inc_index]:
raise DataIndexError()
while inc_index > 0 and index[inc_index] == dims[inc_index]:
index[inc_index-1] +=1
index[inc_index:] = [0]*len(index[inc_index:])
inc_index -=1
return index | python | def increment(index, dims, data_shape):
"""Increments a given index according to the shape of the data added
:param index: Current index to be incremented
:type index: list
:param dims: Shape of the data that the index is being incremented by
:type dims: tuple
:param data_shape: Shape of the data structure being incremented, this is check that incrementing is correct
:returns: list - the incremented index
"""
# check dimensions of data match structure
inc_to_match = data_shape[1:]
for dim_a, dim_b in zip(inc_to_match, dims[-1*(len(inc_to_match)):]):
if dim_a != dim_b:
raise DataIndexError()
# now we can safely discard all but the highest dimension
inc_index = len(index) - len(data_shape)
inc_amount = data_shape[0]
# make the index and increment amount dimensions match
index[inc_index] += inc_amount
# finally check that we did not run over allowed dimension
if index[inc_index] > dims[inc_index]:
raise DataIndexError()
while inc_index > 0 and index[inc_index] == dims[inc_index]:
index[inc_index-1] +=1
index[inc_index:] = [0]*len(index[inc_index:])
inc_index -=1
return index | [
"def",
"increment",
"(",
"index",
",",
"dims",
",",
"data_shape",
")",
":",
"# check dimensions of data match structure",
"inc_to_match",
"=",
"data_shape",
"[",
"1",
":",
"]",
"for",
"dim_a",
",",
"dim_b",
"in",
"zip",
"(",
"inc_to_match",
",",
"dims",
"[",
"-",
"1",
"*",
"(",
"len",
"(",
"inc_to_match",
")",
")",
":",
"]",
")",
":",
"if",
"dim_a",
"!=",
"dim_b",
":",
"raise",
"DataIndexError",
"(",
")",
"# now we can safely discard all but the highest dimension",
"inc_index",
"=",
"len",
"(",
"index",
")",
"-",
"len",
"(",
"data_shape",
")",
"inc_amount",
"=",
"data_shape",
"[",
"0",
"]",
"# make the index and increment amount dimensions match",
"index",
"[",
"inc_index",
"]",
"+=",
"inc_amount",
"# finally check that we did not run over allowed dimension",
"if",
"index",
"[",
"inc_index",
"]",
">",
"dims",
"[",
"inc_index",
"]",
":",
"raise",
"DataIndexError",
"(",
")",
"while",
"inc_index",
">",
"0",
"and",
"index",
"[",
"inc_index",
"]",
"==",
"dims",
"[",
"inc_index",
"]",
":",
"index",
"[",
"inc_index",
"-",
"1",
"]",
"+=",
"1",
"index",
"[",
"inc_index",
":",
"]",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"index",
"[",
"inc_index",
":",
"]",
")",
"inc_index",
"-=",
"1",
"return",
"index"
]
| Increments a given index according to the shape of the data added
:param index: Current index to be incremented
:type index: list
:param dims: Shape of the data that the index is being incremented by
:type dims: tuple
:param data_shape: Shape of the data structure being incremented, this is check that incrementing is correct
:returns: list - the incremented index | [
"Increments",
"a",
"given",
"index",
"according",
"to",
"the",
"shape",
"of",
"the",
"data",
"added"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/data/acqdata.py#L231-L262 | train |
klahnakoski/mo-logs | mo_logs/__init__.py | Log.error | def error(
cls,
template, # human readable template
default_params={}, # parameters for template
cause=None, # pausible cause
stack_depth=0,
**more_params
):
"""
raise an exception with a trace for the cause too
:param template: *string* human readable string with placeholders for parameters
:param default_params: *dict* parameters to fill in template
:param cause: *Exception* for chaining
:param stack_depth: *int* how many calls you want popped off the stack to report the *true* caller
:param log_context: *dict* extra key:value pairs for your convenience
:param more_params: *any more parameters (which will overwrite default_params)
:return:
"""
if not is_text(template):
sys.stderr.write(str("Log.error was expecting a unicode template"))
Log.error("Log.error was expecting a unicode template")
if default_params and isinstance(listwrap(default_params)[0], BaseException):
cause = default_params
default_params = {}
params = Data(dict(default_params, **more_params))
add_to_trace = False
if cause == None:
causes = None
elif is_list(cause):
causes = []
for c in listwrap(cause): # CAN NOT USE LIST-COMPREHENSION IN PYTHON3 (EXTRA STACK DEPTH FROM THE IN-LINED GENERATOR)
causes.append(Except.wrap(c, stack_depth=1))
causes = FlatList(causes)
elif isinstance(cause, BaseException):
causes = Except.wrap(cause, stack_depth=1)
else:
causes = None
Log.error("can only accept Exception, or list of exceptions")
trace = exceptions.extract_stack(stack_depth + 1)
if add_to_trace:
cause[0].trace.extend(trace[1:])
e = Except(context=exceptions.ERROR, template=template, params=params, cause=causes, trace=trace)
raise_from_none(e) | python | def error(
cls,
template, # human readable template
default_params={}, # parameters for template
cause=None, # pausible cause
stack_depth=0,
**more_params
):
"""
raise an exception with a trace for the cause too
:param template: *string* human readable string with placeholders for parameters
:param default_params: *dict* parameters to fill in template
:param cause: *Exception* for chaining
:param stack_depth: *int* how many calls you want popped off the stack to report the *true* caller
:param log_context: *dict* extra key:value pairs for your convenience
:param more_params: *any more parameters (which will overwrite default_params)
:return:
"""
if not is_text(template):
sys.stderr.write(str("Log.error was expecting a unicode template"))
Log.error("Log.error was expecting a unicode template")
if default_params and isinstance(listwrap(default_params)[0], BaseException):
cause = default_params
default_params = {}
params = Data(dict(default_params, **more_params))
add_to_trace = False
if cause == None:
causes = None
elif is_list(cause):
causes = []
for c in listwrap(cause): # CAN NOT USE LIST-COMPREHENSION IN PYTHON3 (EXTRA STACK DEPTH FROM THE IN-LINED GENERATOR)
causes.append(Except.wrap(c, stack_depth=1))
causes = FlatList(causes)
elif isinstance(cause, BaseException):
causes = Except.wrap(cause, stack_depth=1)
else:
causes = None
Log.error("can only accept Exception, or list of exceptions")
trace = exceptions.extract_stack(stack_depth + 1)
if add_to_trace:
cause[0].trace.extend(trace[1:])
e = Except(context=exceptions.ERROR, template=template, params=params, cause=causes, trace=trace)
raise_from_none(e) | [
"def",
"error",
"(",
"cls",
",",
"template",
",",
"# human readable template",
"default_params",
"=",
"{",
"}",
",",
"# parameters for template",
"cause",
"=",
"None",
",",
"# pausible cause",
"stack_depth",
"=",
"0",
",",
"*",
"*",
"more_params",
")",
":",
"if",
"not",
"is_text",
"(",
"template",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"str",
"(",
"\"Log.error was expecting a unicode template\"",
")",
")",
"Log",
".",
"error",
"(",
"\"Log.error was expecting a unicode template\"",
")",
"if",
"default_params",
"and",
"isinstance",
"(",
"listwrap",
"(",
"default_params",
")",
"[",
"0",
"]",
",",
"BaseException",
")",
":",
"cause",
"=",
"default_params",
"default_params",
"=",
"{",
"}",
"params",
"=",
"Data",
"(",
"dict",
"(",
"default_params",
",",
"*",
"*",
"more_params",
")",
")",
"add_to_trace",
"=",
"False",
"if",
"cause",
"==",
"None",
":",
"causes",
"=",
"None",
"elif",
"is_list",
"(",
"cause",
")",
":",
"causes",
"=",
"[",
"]",
"for",
"c",
"in",
"listwrap",
"(",
"cause",
")",
":",
"# CAN NOT USE LIST-COMPREHENSION IN PYTHON3 (EXTRA STACK DEPTH FROM THE IN-LINED GENERATOR)",
"causes",
".",
"append",
"(",
"Except",
".",
"wrap",
"(",
"c",
",",
"stack_depth",
"=",
"1",
")",
")",
"causes",
"=",
"FlatList",
"(",
"causes",
")",
"elif",
"isinstance",
"(",
"cause",
",",
"BaseException",
")",
":",
"causes",
"=",
"Except",
".",
"wrap",
"(",
"cause",
",",
"stack_depth",
"=",
"1",
")",
"else",
":",
"causes",
"=",
"None",
"Log",
".",
"error",
"(",
"\"can only accept Exception, or list of exceptions\"",
")",
"trace",
"=",
"exceptions",
".",
"extract_stack",
"(",
"stack_depth",
"+",
"1",
")",
"if",
"add_to_trace",
":",
"cause",
"[",
"0",
"]",
".",
"trace",
".",
"extend",
"(",
"trace",
"[",
"1",
":",
"]",
")",
"e",
"=",
"Except",
"(",
"context",
"=",
"exceptions",
".",
"ERROR",
",",
"template",
"=",
"template",
",",
"params",
"=",
"params",
",",
"cause",
"=",
"causes",
",",
"trace",
"=",
"trace",
")",
"raise_from_none",
"(",
"e",
")"
]
| raise an exception with a trace for the cause too
:param template: *string* human readable string with placeholders for parameters
:param default_params: *dict* parameters to fill in template
:param cause: *Exception* for chaining
:param stack_depth: *int* how many calls you want popped off the stack to report the *true* caller
:param log_context: *dict* extra key:value pairs for your convenience
:param more_params: *any more parameters (which will overwrite default_params)
:return: | [
"raise",
"an",
"exception",
"with",
"a",
"trace",
"for",
"the",
"cause",
"too"
]
| 0971277ac9caf28a755b766b70621916957d4fea | https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/__init__.py#L305-L354 | train |
TorkamaniLab/metapipe | metapipe/models/command_template.py | _get_parts_list | def _get_parts_list(to_go, so_far=[[]], ticker=None):
""" Iterates over to_go, building the list of parts. To provide
items for the beginning, use so_far.
"""
try:
part = to_go.pop(0)
except IndexError:
return so_far, ticker
# Lists of input groups
if isinstance(part, list) and any(isinstance(e, list) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
ticker.tick()
# Input Group
elif isinstance(part, list) and any(isinstance(e, Input) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
# Magic Inputs
elif isinstance(part, Input) and part.is_magic:
inputs = part.eval()
while len(inputs) > 0:
so_far, ticker = _get_parts_list(inputs, so_far, ticker)
ticker.tick()
# Normal inputs
elif isinstance(part, Input) and not part.is_magic:
so_far[ticker.value].append(part)
# Everything else
else:
so_far = _append(so_far, part)
return so_far, ticker | python | def _get_parts_list(to_go, so_far=[[]], ticker=None):
""" Iterates over to_go, building the list of parts. To provide
items for the beginning, use so_far.
"""
try:
part = to_go.pop(0)
except IndexError:
return so_far, ticker
# Lists of input groups
if isinstance(part, list) and any(isinstance(e, list) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
ticker.tick()
# Input Group
elif isinstance(part, list) and any(isinstance(e, Input) for e in part):
while len(part) > 0:
so_far, ticker = _get_parts_list(part, so_far, ticker)
# Magic Inputs
elif isinstance(part, Input) and part.is_magic:
inputs = part.eval()
while len(inputs) > 0:
so_far, ticker = _get_parts_list(inputs, so_far, ticker)
ticker.tick()
# Normal inputs
elif isinstance(part, Input) and not part.is_magic:
so_far[ticker.value].append(part)
# Everything else
else:
so_far = _append(so_far, part)
return so_far, ticker | [
"def",
"_get_parts_list",
"(",
"to_go",
",",
"so_far",
"=",
"[",
"[",
"]",
"]",
",",
"ticker",
"=",
"None",
")",
":",
"try",
":",
"part",
"=",
"to_go",
".",
"pop",
"(",
"0",
")",
"except",
"IndexError",
":",
"return",
"so_far",
",",
"ticker",
"# Lists of input groups",
"if",
"isinstance",
"(",
"part",
",",
"list",
")",
"and",
"any",
"(",
"isinstance",
"(",
"e",
",",
"list",
")",
"for",
"e",
"in",
"part",
")",
":",
"while",
"len",
"(",
"part",
")",
">",
"0",
":",
"so_far",
",",
"ticker",
"=",
"_get_parts_list",
"(",
"part",
",",
"so_far",
",",
"ticker",
")",
"ticker",
".",
"tick",
"(",
")",
"# Input Group",
"elif",
"isinstance",
"(",
"part",
",",
"list",
")",
"and",
"any",
"(",
"isinstance",
"(",
"e",
",",
"Input",
")",
"for",
"e",
"in",
"part",
")",
":",
"while",
"len",
"(",
"part",
")",
">",
"0",
":",
"so_far",
",",
"ticker",
"=",
"_get_parts_list",
"(",
"part",
",",
"so_far",
",",
"ticker",
")",
"# Magic Inputs",
"elif",
"isinstance",
"(",
"part",
",",
"Input",
")",
"and",
"part",
".",
"is_magic",
":",
"inputs",
"=",
"part",
".",
"eval",
"(",
")",
"while",
"len",
"(",
"inputs",
")",
">",
"0",
":",
"so_far",
",",
"ticker",
"=",
"_get_parts_list",
"(",
"inputs",
",",
"so_far",
",",
"ticker",
")",
"ticker",
".",
"tick",
"(",
")",
"# Normal inputs",
"elif",
"isinstance",
"(",
"part",
",",
"Input",
")",
"and",
"not",
"part",
".",
"is_magic",
":",
"so_far",
"[",
"ticker",
".",
"value",
"]",
".",
"append",
"(",
"part",
")",
"# Everything else",
"else",
":",
"so_far",
"=",
"_append",
"(",
"so_far",
",",
"part",
")",
"return",
"so_far",
",",
"ticker"
]
| Iterates over to_go, building the list of parts. To provide
items for the beginning, use so_far. | [
"Iterates",
"over",
"to_go",
"building",
"the",
"list",
"of",
"parts",
".",
"To",
"provide",
"items",
"for",
"the",
"beginning",
"use",
"so_far",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L74-L105 | train |
TorkamaniLab/metapipe | metapipe/models/command_template.py | _get_max_size | def _get_max_size(parts, size=1):
""" Given a list of parts, find the maximum number of commands
contained in it.
"""
max_group_size = 0
for part in parts:
if isinstance(part, list):
group_size = 0
for input_group in part:
group_size += 1
if group_size > max_group_size:
max_group_size = group_size
magic_size = _get_magic_size(parts)
return max_group_size * magic_size | python | def _get_max_size(parts, size=1):
""" Given a list of parts, find the maximum number of commands
contained in it.
"""
max_group_size = 0
for part in parts:
if isinstance(part, list):
group_size = 0
for input_group in part:
group_size += 1
if group_size > max_group_size:
max_group_size = group_size
magic_size = _get_magic_size(parts)
return max_group_size * magic_size | [
"def",
"_get_max_size",
"(",
"parts",
",",
"size",
"=",
"1",
")",
":",
"max_group_size",
"=",
"0",
"for",
"part",
"in",
"parts",
":",
"if",
"isinstance",
"(",
"part",
",",
"list",
")",
":",
"group_size",
"=",
"0",
"for",
"input_group",
"in",
"part",
":",
"group_size",
"+=",
"1",
"if",
"group_size",
">",
"max_group_size",
":",
"max_group_size",
"=",
"group_size",
"magic_size",
"=",
"_get_magic_size",
"(",
"parts",
")",
"return",
"max_group_size",
"*",
"magic_size"
]
| Given a list of parts, find the maximum number of commands
contained in it. | [
"Given",
"a",
"list",
"of",
"parts",
"find",
"the",
"maximum",
"number",
"of",
"commands",
"contained",
"in",
"it",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L108-L123 | train |
TorkamaniLab/metapipe | metapipe/models/command_template.py | _grow | def _grow(list_of_lists, num_new):
""" Given a list of lists, and a number of new lists to add, copy the
content of the first list into the new ones, and add them to the list
of lists.
"""
first = list_of_lists[0]
for i in range(num_new):
list_of_lists.append(copy.deepcopy(first))
return list_of_lists | python | def _grow(list_of_lists, num_new):
""" Given a list of lists, and a number of new lists to add, copy the
content of the first list into the new ones, and add them to the list
of lists.
"""
first = list_of_lists[0]
for i in range(num_new):
list_of_lists.append(copy.deepcopy(first))
return list_of_lists | [
"def",
"_grow",
"(",
"list_of_lists",
",",
"num_new",
")",
":",
"first",
"=",
"list_of_lists",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"num_new",
")",
":",
"list_of_lists",
".",
"append",
"(",
"copy",
".",
"deepcopy",
"(",
"first",
")",
")",
"return",
"list_of_lists"
]
| Given a list of lists, and a number of new lists to add, copy the
content of the first list into the new ones, and add them to the list
of lists. | [
"Given",
"a",
"list",
"of",
"lists",
"and",
"a",
"number",
"of",
"new",
"lists",
"to",
"add",
"copy",
"the",
"content",
"of",
"the",
"first",
"list",
"into",
"the",
"new",
"ones",
"and",
"add",
"them",
"to",
"the",
"list",
"of",
"lists",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L144-L152 | train |
TorkamaniLab/metapipe | metapipe/models/command_template.py | _search_for_files | def _search_for_files(parts):
""" Given a list of parts, return all of the nested file parts. """
file_parts = []
for part in parts:
if isinstance(part, list):
file_parts.extend(_search_for_files(part))
elif isinstance(part, FileToken):
file_parts.append(part)
return file_parts | python | def _search_for_files(parts):
""" Given a list of parts, return all of the nested file parts. """
file_parts = []
for part in parts:
if isinstance(part, list):
file_parts.extend(_search_for_files(part))
elif isinstance(part, FileToken):
file_parts.append(part)
return file_parts | [
"def",
"_search_for_files",
"(",
"parts",
")",
":",
"file_parts",
"=",
"[",
"]",
"for",
"part",
"in",
"parts",
":",
"if",
"isinstance",
"(",
"part",
",",
"list",
")",
":",
"file_parts",
".",
"extend",
"(",
"_search_for_files",
"(",
"part",
")",
")",
"elif",
"isinstance",
"(",
"part",
",",
"FileToken",
")",
":",
"file_parts",
".",
"append",
"(",
"part",
")",
"return",
"file_parts"
]
| Given a list of parts, return all of the nested file parts. | [
"Given",
"a",
"list",
"of",
"parts",
"return",
"all",
"of",
"the",
"nested",
"file",
"parts",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L155-L163 | train |
TorkamaniLab/metapipe | metapipe/models/command_template.py | CommandTemplate.eval | def eval(self):
""" Returns a list of Command objects that can be evaluated as their
string values. Each command will track it's preliminary dependencies,
but these values should not be depended on for running commands.
"""
max_size = _get_max_size(self.parts)
parts_list = _grow([[]], max_size-1)
counter = Ticker(max_size)
parts = self.parts[:]
while len(parts) > 0:
parts_list, counter = _get_parts_list(parts,
parts_list, counter)
commands = []
for i, parts in enumerate(parts_list):
alias = self._get_alias(i+1)
new_parts = copy.deepcopy(parts)
commands.append(Command(alias=alias, parts=new_parts))
return commands | python | def eval(self):
""" Returns a list of Command objects that can be evaluated as their
string values. Each command will track it's preliminary dependencies,
but these values should not be depended on for running commands.
"""
max_size = _get_max_size(self.parts)
parts_list = _grow([[]], max_size-1)
counter = Ticker(max_size)
parts = self.parts[:]
while len(parts) > 0:
parts_list, counter = _get_parts_list(parts,
parts_list, counter)
commands = []
for i, parts in enumerate(parts_list):
alias = self._get_alias(i+1)
new_parts = copy.deepcopy(parts)
commands.append(Command(alias=alias, parts=new_parts))
return commands | [
"def",
"eval",
"(",
"self",
")",
":",
"max_size",
"=",
"_get_max_size",
"(",
"self",
".",
"parts",
")",
"parts_list",
"=",
"_grow",
"(",
"[",
"[",
"]",
"]",
",",
"max_size",
"-",
"1",
")",
"counter",
"=",
"Ticker",
"(",
"max_size",
")",
"parts",
"=",
"self",
".",
"parts",
"[",
":",
"]",
"while",
"len",
"(",
"parts",
")",
">",
"0",
":",
"parts_list",
",",
"counter",
"=",
"_get_parts_list",
"(",
"parts",
",",
"parts_list",
",",
"counter",
")",
"commands",
"=",
"[",
"]",
"for",
"i",
",",
"parts",
"in",
"enumerate",
"(",
"parts_list",
")",
":",
"alias",
"=",
"self",
".",
"_get_alias",
"(",
"i",
"+",
"1",
")",
"new_parts",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
")",
"commands",
".",
"append",
"(",
"Command",
"(",
"alias",
"=",
"alias",
",",
"parts",
"=",
"new_parts",
")",
")",
"return",
"commands"
]
| Returns a list of Command objects that can be evaluated as their
string values. Each command will track it's preliminary dependencies,
but these values should not be depended on for running commands. | [
"Returns",
"a",
"list",
"of",
"Command",
"objects",
"that",
"can",
"be",
"evaluated",
"as",
"their",
"string",
"values",
".",
"Each",
"command",
"will",
"track",
"it",
"s",
"preliminary",
"dependencies",
"but",
"these",
"values",
"should",
"not",
"be",
"depended",
"on",
"for",
"running",
"commands",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L48-L67 | train |
hkupty/asterix | asterix/core.py | start_system | def start_system(components, bind_to, hooks={}):
"""Start all components on component map."""
deps = build_deps_graph(components)
started_components = start_components(components, deps, None)
run_hooks(hooks, started_components)
if type(bind_to) is str:
master = started_components[bind_to]
else:
master = bind_to
setattr(master, '__components', started_components)
return master | python | def start_system(components, bind_to, hooks={}):
"""Start all components on component map."""
deps = build_deps_graph(components)
started_components = start_components(components, deps, None)
run_hooks(hooks, started_components)
if type(bind_to) is str:
master = started_components[bind_to]
else:
master = bind_to
setattr(master, '__components', started_components)
return master | [
"def",
"start_system",
"(",
"components",
",",
"bind_to",
",",
"hooks",
"=",
"{",
"}",
")",
":",
"deps",
"=",
"build_deps_graph",
"(",
"components",
")",
"started_components",
"=",
"start_components",
"(",
"components",
",",
"deps",
",",
"None",
")",
"run_hooks",
"(",
"hooks",
",",
"started_components",
")",
"if",
"type",
"(",
"bind_to",
")",
"is",
"str",
":",
"master",
"=",
"started_components",
"[",
"bind_to",
"]",
"else",
":",
"master",
"=",
"bind_to",
"setattr",
"(",
"master",
",",
"'__components'",
",",
"started_components",
")",
"return",
"master"
]
| Start all components on component map. | [
"Start",
"all",
"components",
"on",
"component",
"map",
"."
]
| 809ee5b02a29e38889c5bd4eb5f0859da0703d0c | https://github.com/hkupty/asterix/blob/809ee5b02a29e38889c5bd4eb5f0859da0703d0c/asterix/core.py#L48-L61 | train |
portfors-lab/sparkle | sparkle/stim/auto_parameter_model.py | AutoParameterModel.ranges | def ranges(self):
"""The expanded lists of values generated from the parameter fields
:returns: list<list>, outer list is for each parameter, inner loops are that
parameter's values to loop through
"""
steps = []
for p in self._parameters:
# inclusive range
if p['parameter'] == 'filename':
steps.append(p['names'])
else:
if p['step'] > 0:
start = p['start']
stop = p['stop']
if start > stop:
step = p['step']*-1
else:
step = p['step']
# nsteps = np.ceil(np.around(abs(start - stop), 4) / p['step'])
nsteps = self.nStepsForParam(p)
# print 'nsteps', np.around(abs(start - stop), 4), p['step']
# print 'start, stop, steps', start, stop, nsteps
step_tmp = np.linspace(start, start+step*(nsteps-2), nsteps-1)
# print 'step_tmp', step_tmp
# if step_tmp[-1] != stop:
step_tmp = np.append(step_tmp,stop)
# print 'step range', step_tmp
steps.append(np.around(step_tmp,4))
else:
assert p['start'] == p['stop']
steps.append([p['start']])
return steps | python | def ranges(self):
"""The expanded lists of values generated from the parameter fields
:returns: list<list>, outer list is for each parameter, inner loops are that
parameter's values to loop through
"""
steps = []
for p in self._parameters:
# inclusive range
if p['parameter'] == 'filename':
steps.append(p['names'])
else:
if p['step'] > 0:
start = p['start']
stop = p['stop']
if start > stop:
step = p['step']*-1
else:
step = p['step']
# nsteps = np.ceil(np.around(abs(start - stop), 4) / p['step'])
nsteps = self.nStepsForParam(p)
# print 'nsteps', np.around(abs(start - stop), 4), p['step']
# print 'start, stop, steps', start, stop, nsteps
step_tmp = np.linspace(start, start+step*(nsteps-2), nsteps-1)
# print 'step_tmp', step_tmp
# if step_tmp[-1] != stop:
step_tmp = np.append(step_tmp,stop)
# print 'step range', step_tmp
steps.append(np.around(step_tmp,4))
else:
assert p['start'] == p['stop']
steps.append([p['start']])
return steps | [
"def",
"ranges",
"(",
"self",
")",
":",
"steps",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"_parameters",
":",
"# inclusive range",
"if",
"p",
"[",
"'parameter'",
"]",
"==",
"'filename'",
":",
"steps",
".",
"append",
"(",
"p",
"[",
"'names'",
"]",
")",
"else",
":",
"if",
"p",
"[",
"'step'",
"]",
">",
"0",
":",
"start",
"=",
"p",
"[",
"'start'",
"]",
"stop",
"=",
"p",
"[",
"'stop'",
"]",
"if",
"start",
">",
"stop",
":",
"step",
"=",
"p",
"[",
"'step'",
"]",
"*",
"-",
"1",
"else",
":",
"step",
"=",
"p",
"[",
"'step'",
"]",
"# nsteps = np.ceil(np.around(abs(start - stop), 4) / p['step'])",
"nsteps",
"=",
"self",
".",
"nStepsForParam",
"(",
"p",
")",
"# print 'nsteps', np.around(abs(start - stop), 4), p['step']",
"# print 'start, stop, steps', start, stop, nsteps",
"step_tmp",
"=",
"np",
".",
"linspace",
"(",
"start",
",",
"start",
"+",
"step",
"*",
"(",
"nsteps",
"-",
"2",
")",
",",
"nsteps",
"-",
"1",
")",
"# print 'step_tmp', step_tmp",
"# if step_tmp[-1] != stop:",
"step_tmp",
"=",
"np",
".",
"append",
"(",
"step_tmp",
",",
"stop",
")",
"# print 'step range', step_tmp",
"steps",
".",
"append",
"(",
"np",
".",
"around",
"(",
"step_tmp",
",",
"4",
")",
")",
"else",
":",
"assert",
"p",
"[",
"'start'",
"]",
"==",
"p",
"[",
"'stop'",
"]",
"steps",
".",
"append",
"(",
"[",
"p",
"[",
"'start'",
"]",
"]",
")",
"return",
"steps"
]
| The expanded lists of values generated from the parameter fields
:returns: list<list>, outer list is for each parameter, inner loops are that
parameter's values to loop through | [
"The",
"expanded",
"lists",
"of",
"values",
"generated",
"from",
"the",
"parameter",
"fields"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L301-L335 | train |
portfors-lab/sparkle | sparkle/stim/auto_parameter_model.py | AutoParameterModel._selectionParameters | def _selectionParameters(self, param):
"""see docstring for selectedParameterTypes"""
components = param['selection']
if len(components) == 0:
return []
# extract the selected component names
editable_sets = []
for comp in components:
# all the keys (component names) for the auto details for components in selection
details = comp.auto_details()
editable_sets.append(set(details.keys()))
editable_paramters = set.intersection(*editable_sets)
# do not allow selecting of filename from here
return list(editable_paramters) | python | def _selectionParameters(self, param):
"""see docstring for selectedParameterTypes"""
components = param['selection']
if len(components) == 0:
return []
# extract the selected component names
editable_sets = []
for comp in components:
# all the keys (component names) for the auto details for components in selection
details = comp.auto_details()
editable_sets.append(set(details.keys()))
editable_paramters = set.intersection(*editable_sets)
# do not allow selecting of filename from here
return list(editable_paramters) | [
"def",
"_selectionParameters",
"(",
"self",
",",
"param",
")",
":",
"components",
"=",
"param",
"[",
"'selection'",
"]",
"if",
"len",
"(",
"components",
")",
"==",
"0",
":",
"return",
"[",
"]",
"# extract the selected component names",
"editable_sets",
"=",
"[",
"]",
"for",
"comp",
"in",
"components",
":",
"# all the keys (component names) for the auto details for components in selection",
"details",
"=",
"comp",
".",
"auto_details",
"(",
")",
"editable_sets",
".",
"append",
"(",
"set",
"(",
"details",
".",
"keys",
"(",
")",
")",
")",
"editable_paramters",
"=",
"set",
".",
"intersection",
"(",
"*",
"editable_sets",
")",
"# do not allow selecting of filename from here",
"return",
"list",
"(",
"editable_paramters",
")"
]
| see docstring for selectedParameterTypes | [
"see",
"docstring",
"for",
"selectedParameterTypes"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L337-L350 | train |
portfors-lab/sparkle | sparkle/stim/auto_parameter_model.py | AutoParameterModel.updateComponentStartVals | def updateComponentStartVals(self):
"""Go through selected components for each auto parameter and set the start value"""
for param in self._parameters:
for component in param['selection']:
if param['parameter'] == 'filename':
component.set(param['parameter'], param['names'][0])
else:
component.set(param['parameter'], param['start']) | python | def updateComponentStartVals(self):
"""Go through selected components for each auto parameter and set the start value"""
for param in self._parameters:
for component in param['selection']:
if param['parameter'] == 'filename':
component.set(param['parameter'], param['names'][0])
else:
component.set(param['parameter'], param['start']) | [
"def",
"updateComponentStartVals",
"(",
"self",
")",
":",
"for",
"param",
"in",
"self",
".",
"_parameters",
":",
"for",
"component",
"in",
"param",
"[",
"'selection'",
"]",
":",
"if",
"param",
"[",
"'parameter'",
"]",
"==",
"'filename'",
":",
"component",
".",
"set",
"(",
"param",
"[",
"'parameter'",
"]",
",",
"param",
"[",
"'names'",
"]",
"[",
"0",
"]",
")",
"else",
":",
"component",
".",
"set",
"(",
"param",
"[",
"'parameter'",
"]",
",",
"param",
"[",
"'start'",
"]",
")"
]
| Go through selected components for each auto parameter and set the start value | [
"Go",
"through",
"selected",
"components",
"for",
"each",
"auto",
"parameter",
"and",
"set",
"the",
"start",
"value"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L352-L359 | train |
portfors-lab/sparkle | sparkle/stim/auto_parameter_model.py | AutoParameterModel.verify | def verify(self):
"""Checks all parameters for invalidating conditions
:returns: str -- message if error, 0 otherwise
"""
for row in range(self.nrows()):
result = self.verify_row(row)
if result != 0:
return result
return 0 | python | def verify(self):
"""Checks all parameters for invalidating conditions
:returns: str -- message if error, 0 otherwise
"""
for row in range(self.nrows()):
result = self.verify_row(row)
if result != 0:
return result
return 0 | [
"def",
"verify",
"(",
"self",
")",
":",
"for",
"row",
"in",
"range",
"(",
"self",
".",
"nrows",
"(",
")",
")",
":",
"result",
"=",
"self",
".",
"verify_row",
"(",
"row",
")",
"if",
"result",
"!=",
"0",
":",
"return",
"result",
"return",
"0"
]
| Checks all parameters for invalidating conditions
:returns: str -- message if error, 0 otherwise | [
"Checks",
"all",
"parameters",
"for",
"invalidating",
"conditions"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L381-L390 | train |
yamcs/yamcs-python | yamcs-client/examples/query_mdb.py | find_parameter | def find_parameter():
"""Find one parameter."""
p1 = mdb.get_parameter('/YSS/SIMULATOR/BatteryVoltage2')
print('Via qualified name:', p1)
p2 = mdb.get_parameter('MDB:OPS Name/SIMULATOR_BatteryVoltage2')
print('Via domain-specific alias:', p2) | python | def find_parameter():
"""Find one parameter."""
p1 = mdb.get_parameter('/YSS/SIMULATOR/BatteryVoltage2')
print('Via qualified name:', p1)
p2 = mdb.get_parameter('MDB:OPS Name/SIMULATOR_BatteryVoltage2')
print('Via domain-specific alias:', p2) | [
"def",
"find_parameter",
"(",
")",
":",
"p1",
"=",
"mdb",
".",
"get_parameter",
"(",
"'/YSS/SIMULATOR/BatteryVoltage2'",
")",
"print",
"(",
"'Via qualified name:'",
",",
"p1",
")",
"p2",
"=",
"mdb",
".",
"get_parameter",
"(",
"'MDB:OPS Name/SIMULATOR_BatteryVoltage2'",
")",
"print",
"(",
"'Via domain-specific alias:'",
",",
"p2",
")"
]
| Find one parameter. | [
"Find",
"one",
"parameter",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/query_mdb.py#L24-L30 | train |
lowandrew/OLCTools | spadespipeline/mobrecon.py | MobRecon.summary_reporter | def summary_reporter(self):
"""
Parse individual MOB Recon reports into a summary report
"""
logging.info('Creating MOB-recon summary report')
with open(os.path.join(self.reportpath, 'mob_recon_summary.csv'), 'w') as summary:
data = 'Strain,Location,Contig,Incompatibility,IncompatibilityAccession,RelaxaseType,' \
'MashNearestNeighbor,MashNeighborDistance\n'
for sample in self.metadata:
# Initialise a dictionary to store results for the COWBAT final report
sample[self.analysistype].pipelineresults = dict()
for primarykey, results in sample[self.analysistype].report_dict.items():
# Only process results if they are not calculated to be chromosomal
if results['cluster_id'] != 'chromosome':
data += ','.join(str(result).replace(',', ';') if str(result) != 'nan' else 'ND'
for result in [
sample.name,
results['cluster_id'],
results['contig_id'].split('|')[1],
results['rep_type'],
results['rep_type_accession'],
results['relaxase_type'],
results['mash_nearest_neighbor'],
results['mash_neighbor_distance']]
)
data += '\n'
# Add the calculated incompatibility to the pipeline results for use in the final COWBAT report
sample[self.analysistype].pipelineresults[results['cluster_id']] = \
';'.join(str(result).replace(',', ';') if str(result) != 'nan' else 'ND'
for result in [
results['rep_type']]
)
summary.write(data) | python | def summary_reporter(self):
"""
Parse individual MOB Recon reports into a summary report
"""
logging.info('Creating MOB-recon summary report')
with open(os.path.join(self.reportpath, 'mob_recon_summary.csv'), 'w') as summary:
data = 'Strain,Location,Contig,Incompatibility,IncompatibilityAccession,RelaxaseType,' \
'MashNearestNeighbor,MashNeighborDistance\n'
for sample in self.metadata:
# Initialise a dictionary to store results for the COWBAT final report
sample[self.analysistype].pipelineresults = dict()
for primarykey, results in sample[self.analysistype].report_dict.items():
# Only process results if they are not calculated to be chromosomal
if results['cluster_id'] != 'chromosome':
data += ','.join(str(result).replace(',', ';') if str(result) != 'nan' else 'ND'
for result in [
sample.name,
results['cluster_id'],
results['contig_id'].split('|')[1],
results['rep_type'],
results['rep_type_accession'],
results['relaxase_type'],
results['mash_nearest_neighbor'],
results['mash_neighbor_distance']]
)
data += '\n'
# Add the calculated incompatibility to the pipeline results for use in the final COWBAT report
sample[self.analysistype].pipelineresults[results['cluster_id']] = \
';'.join(str(result).replace(',', ';') if str(result) != 'nan' else 'ND'
for result in [
results['rep_type']]
)
summary.write(data) | [
"def",
"summary_reporter",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Creating MOB-recon summary report'",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'mob_recon_summary.csv'",
")",
",",
"'w'",
")",
"as",
"summary",
":",
"data",
"=",
"'Strain,Location,Contig,Incompatibility,IncompatibilityAccession,RelaxaseType,'",
"'MashNearestNeighbor,MashNeighborDistance\\n'",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise a dictionary to store results for the COWBAT final report",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"pipelineresults",
"=",
"dict",
"(",
")",
"for",
"primarykey",
",",
"results",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report_dict",
".",
"items",
"(",
")",
":",
"# Only process results if they are not calculated to be chromosomal",
"if",
"results",
"[",
"'cluster_id'",
"]",
"!=",
"'chromosome'",
":",
"data",
"+=",
"','",
".",
"join",
"(",
"str",
"(",
"result",
")",
".",
"replace",
"(",
"','",
",",
"';'",
")",
"if",
"str",
"(",
"result",
")",
"!=",
"'nan'",
"else",
"'ND'",
"for",
"result",
"in",
"[",
"sample",
".",
"name",
",",
"results",
"[",
"'cluster_id'",
"]",
",",
"results",
"[",
"'contig_id'",
"]",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
",",
"results",
"[",
"'rep_type'",
"]",
",",
"results",
"[",
"'rep_type_accession'",
"]",
",",
"results",
"[",
"'relaxase_type'",
"]",
",",
"results",
"[",
"'mash_nearest_neighbor'",
"]",
",",
"results",
"[",
"'mash_neighbor_distance'",
"]",
"]",
")",
"data",
"+=",
"'\\n'",
"# Add the calculated incompatibility to the pipeline results for use in the final COWBAT report",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"pipelineresults",
"[",
"results",
"[",
"'cluster_id'",
"]",
"]",
"=",
"';'",
".",
"join",
"(",
"str",
"(",
"result",
")",
".",
"replace",
"(",
"','",
",",
"';'",
")",
"if",
"str",
"(",
"result",
")",
"!=",
"'nan'",
"else",
"'ND'",
"for",
"result",
"in",
"[",
"results",
"[",
"'rep_type'",
"]",
"]",
")",
"summary",
".",
"write",
"(",
"data",
")"
]
| Parse individual MOB Recon reports into a summary report | [
"Parse",
"individual",
"MOB",
"Recon",
"reports",
"into",
"a",
"summary",
"report"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mobrecon.py#L107-L139 | train |
lowandrew/OLCTools | spadespipeline/mobrecon.py | MobRecon.amrsummary | def amrsummary(self):
"""
Create a report combining results from resfinder_assembled and mob_recon_summary reports
"""
logging.info('Creating AMR summary table from ResFinder and MOB-recon outputs')
with open(os.path.join(self.reportpath, 'amr_summary.csv'), 'w') as amr:
data = 'Strain,Gene,Allele,Resistance,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n'
for sample in self.metadata:
# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.
# As the inc type will only be located on one of possibly several contigs associated with a predicted
# plasmid, it is nice to know details about the plasmid
inc_dict = dict()
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
inc = results['cluster_id']
# Convert the rep_type field (predicted incompatibilities) into a more a consistent
# format - pandas will call empty fields 'nan', which is a float
rep = str(results['rep_type']).replace(',', ';') if str(results['rep_type']) != 'nan' else 'ND'
# Add the incompatibility to the set
try:
inc_dict[inc].add(rep)
except KeyError:
inc_dict[inc] = set()
inc_dict[inc].add(rep)
except KeyError:
pass
#
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
contig = results['contig_id'].split('|')[1]
# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test
# to see if the contig name looks unicycler-like, and set the name appropriately (in this
# case, it would be 3)
if contig.split('_')[1].startswith('length'):
contig = contig.split('_')[0]
# Use the list of results from the resfinder analyses
for amr_result in sample.resfinder_assembled.sampledata:
# Ensure that the current contig is the same as the one in the resfinder results. Ensure
# that the slice of the amr result is treated as a string. Unicycler contigs seem to be
# treated as integers
if contig == str(amr_result[-1]):
# Set up the output string
data += '{sn},'.format(sn=sample.name)
# Add the resistance and MOB recon outputs for the strain
data += '{amr},{mob}\n'\
.format(amr=','.join(str(res) if str(res) != 'nan' else 'ND' for res in
amr_result[0:4]),
mob=','.join(str(res) if str(res) != 'nan' else 'ND' for res in
[contig, results['cluster_id'],
';'.join(sorted(inc_dict[str(results['cluster_id'])]))
]
)
)
except KeyError:
pass
amr.write(data) | python | def amrsummary(self):
"""
Create a report combining results from resfinder_assembled and mob_recon_summary reports
"""
logging.info('Creating AMR summary table from ResFinder and MOB-recon outputs')
with open(os.path.join(self.reportpath, 'amr_summary.csv'), 'w') as amr:
data = 'Strain,Gene,Allele,Resistance,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n'
for sample in self.metadata:
# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.
# As the inc type will only be located on one of possibly several contigs associated with a predicted
# plasmid, it is nice to know details about the plasmid
inc_dict = dict()
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
inc = results['cluster_id']
# Convert the rep_type field (predicted incompatibilities) into a more a consistent
# format - pandas will call empty fields 'nan', which is a float
rep = str(results['rep_type']).replace(',', ';') if str(results['rep_type']) != 'nan' else 'ND'
# Add the incompatibility to the set
try:
inc_dict[inc].add(rep)
except KeyError:
inc_dict[inc] = set()
inc_dict[inc].add(rep)
except KeyError:
pass
#
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
contig = results['contig_id'].split('|')[1]
# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test
# to see if the contig name looks unicycler-like, and set the name appropriately (in this
# case, it would be 3)
if contig.split('_')[1].startswith('length'):
contig = contig.split('_')[0]
# Use the list of results from the resfinder analyses
for amr_result in sample.resfinder_assembled.sampledata:
# Ensure that the current contig is the same as the one in the resfinder results. Ensure
# that the slice of the amr result is treated as a string. Unicycler contigs seem to be
# treated as integers
if contig == str(amr_result[-1]):
# Set up the output string
data += '{sn},'.format(sn=sample.name)
# Add the resistance and MOB recon outputs for the strain
data += '{amr},{mob}\n'\
.format(amr=','.join(str(res) if str(res) != 'nan' else 'ND' for res in
amr_result[0:4]),
mob=','.join(str(res) if str(res) != 'nan' else 'ND' for res in
[contig, results['cluster_id'],
';'.join(sorted(inc_dict[str(results['cluster_id'])]))
]
)
)
except KeyError:
pass
amr.write(data) | [
"def",
"amrsummary",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Creating AMR summary table from ResFinder and MOB-recon outputs'",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'amr_summary.csv'",
")",
",",
"'w'",
")",
"as",
"amr",
":",
"data",
"=",
"'Strain,Gene,Allele,Resistance,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\\n'",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.",
"# As the inc type will only be located on one of possibly several contigs associated with a predicted",
"# plasmid, it is nice to know details about the plasmid",
"inc_dict",
"=",
"dict",
"(",
")",
"for",
"primarykey",
",",
"results",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report_dict",
".",
"items",
"(",
")",
":",
"try",
":",
"inc",
"=",
"results",
"[",
"'cluster_id'",
"]",
"# Convert the rep_type field (predicted incompatibilities) into a more a consistent",
"# format - pandas will call empty fields 'nan', which is a float",
"rep",
"=",
"str",
"(",
"results",
"[",
"'rep_type'",
"]",
")",
".",
"replace",
"(",
"','",
",",
"';'",
")",
"if",
"str",
"(",
"results",
"[",
"'rep_type'",
"]",
")",
"!=",
"'nan'",
"else",
"'ND'",
"# Add the incompatibility to the set",
"try",
":",
"inc_dict",
"[",
"inc",
"]",
".",
"add",
"(",
"rep",
")",
"except",
"KeyError",
":",
"inc_dict",
"[",
"inc",
"]",
"=",
"set",
"(",
")",
"inc_dict",
"[",
"inc",
"]",
".",
"add",
"(",
"rep",
")",
"except",
"KeyError",
":",
"pass",
"#",
"for",
"primarykey",
",",
"results",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report_dict",
".",
"items",
"(",
")",
":",
"try",
":",
"contig",
"=",
"results",
"[",
"'contig_id'",
"]",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
"# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test",
"# to see if the contig name looks unicycler-like, and set the name appropriately (in this",
"# case, it would be 3)",
"if",
"contig",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
".",
"startswith",
"(",
"'length'",
")",
":",
"contig",
"=",
"contig",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"# Use the list of results from the resfinder analyses",
"for",
"amr_result",
"in",
"sample",
".",
"resfinder_assembled",
".",
"sampledata",
":",
"# Ensure that the current contig is the same as the one in the resfinder results. Ensure",
"# that the slice of the amr result is treated as a string. Unicycler contigs seem to be",
"# treated as integers",
"if",
"contig",
"==",
"str",
"(",
"amr_result",
"[",
"-",
"1",
"]",
")",
":",
"# Set up the output string",
"data",
"+=",
"'{sn},'",
".",
"format",
"(",
"sn",
"=",
"sample",
".",
"name",
")",
"# Add the resistance and MOB recon outputs for the strain",
"data",
"+=",
"'{amr},{mob}\\n'",
".",
"format",
"(",
"amr",
"=",
"','",
".",
"join",
"(",
"str",
"(",
"res",
")",
"if",
"str",
"(",
"res",
")",
"!=",
"'nan'",
"else",
"'ND'",
"for",
"res",
"in",
"amr_result",
"[",
"0",
":",
"4",
"]",
")",
",",
"mob",
"=",
"','",
".",
"join",
"(",
"str",
"(",
"res",
")",
"if",
"str",
"(",
"res",
")",
"!=",
"'nan'",
"else",
"'ND'",
"for",
"res",
"in",
"[",
"contig",
",",
"results",
"[",
"'cluster_id'",
"]",
",",
"';'",
".",
"join",
"(",
"sorted",
"(",
"inc_dict",
"[",
"str",
"(",
"results",
"[",
"'cluster_id'",
"]",
")",
"]",
")",
")",
"]",
")",
")",
"except",
"KeyError",
":",
"pass",
"amr",
".",
"write",
"(",
"data",
")"
]
| Create a report combining results from resfinder_assembled and mob_recon_summary reports | [
"Create",
"a",
"report",
"combining",
"results",
"from",
"resfinder_assembled",
"and",
"mob_recon_summary",
"reports"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mobrecon.py#L141-L196 | train |
lowandrew/OLCTools | spadespipeline/mobrecon.py | MobRecon.geneseekrsummary | def geneseekrsummary(self):
"""
Create a report combining GeneSeekr and MOB Recon outputs
"""
logging.info('Creating predicted plasmid-borne gene summary table')
with open(os.path.join(self.reportpath, 'plasmid_borne_summary.csv'), 'w') as pbs:
data = 'Strain,Gene,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n'
for sample in self.metadata:
# Create a flag to determine whether the strain name needs to be added to the data string if there
# were no results
result_bool = False
# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.
# As the inc type will only be located on one of possibly several contigs associated with a predicted
# plasmid, it is nice to know details about the plasmid
inc_dict = dict()
# Iterate through all the MOB recon outputs to populate the incompatibility set
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
inc = results['cluster_id']
# Convert the rep_type field (predicted incompatibilities) into a more a consistent
# format - pandas will call empty fields 'nan', which is a float
rep = str(results['rep_type']).replace(',', ';') if str(results['rep_type']) != 'nan' else 'ND'
# Add the incompatibility to the set
try:
inc_dict[inc].add(rep)
except KeyError:
inc_dict[inc] = set()
inc_dict[inc].add(rep)
except KeyError:
pass
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
contig = results['contig_id'].split('|')[1]
# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test
# to see if the contig name looks unicycler-like, and set the name appropriately (in this
# case, it would be 3)
if contig.split('_')[1].startswith('length'):
contig = contig.split('_')[0]
for gene, result_dict in sample.geneseekr_results.sampledata.items():
if contig == result_dict['query_id']:
percent_identity = result_dict['PercentIdentity']
# Set up the output string if the percent identity of the match is greater than the
# cutoff
if float(result_dict['PercentIdentity']) >= self.cutoff:
# As there was at least a single gene passing the threshold, set the boolean to True
result_bool = True
data += '{sn},'.format(sn=sample.name)
data += '{gene},{pi},{contig},{cid},{inc}\n'\
.format(gene=gene,
pi=percent_identity,
contig=contig,
cid=results['cluster_id'],
inc=';'.join(sorted(inc_dict[str(results['cluster_id'])])))
except KeyError:
pass
# If there were no results associated with the strain, make the row the strain name only
if not result_bool:
data += '{sn}\n'.format(sn=sample.name)
# Write the string to the report
pbs.write(data) | python | def geneseekrsummary(self):
"""
Create a report combining GeneSeekr and MOB Recon outputs
"""
logging.info('Creating predicted plasmid-borne gene summary table')
with open(os.path.join(self.reportpath, 'plasmid_borne_summary.csv'), 'w') as pbs:
data = 'Strain,Gene,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n'
for sample in self.metadata:
# Create a flag to determine whether the strain name needs to be added to the data string if there
# were no results
result_bool = False
# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.
# As the inc type will only be located on one of possibly several contigs associated with a predicted
# plasmid, it is nice to know details about the plasmid
inc_dict = dict()
# Iterate through all the MOB recon outputs to populate the incompatibility set
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
inc = results['cluster_id']
# Convert the rep_type field (predicted incompatibilities) into a more a consistent
# format - pandas will call empty fields 'nan', which is a float
rep = str(results['rep_type']).replace(',', ';') if str(results['rep_type']) != 'nan' else 'ND'
# Add the incompatibility to the set
try:
inc_dict[inc].add(rep)
except KeyError:
inc_dict[inc] = set()
inc_dict[inc].add(rep)
except KeyError:
pass
for primarykey, results in sample[self.analysistype].report_dict.items():
try:
contig = results['contig_id'].split('|')[1]
# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test
# to see if the contig name looks unicycler-like, and set the name appropriately (in this
# case, it would be 3)
if contig.split('_')[1].startswith('length'):
contig = contig.split('_')[0]
for gene, result_dict in sample.geneseekr_results.sampledata.items():
if contig == result_dict['query_id']:
percent_identity = result_dict['PercentIdentity']
# Set up the output string if the percent identity of the match is greater than the
# cutoff
if float(result_dict['PercentIdentity']) >= self.cutoff:
# As there was at least a single gene passing the threshold, set the boolean to True
result_bool = True
data += '{sn},'.format(sn=sample.name)
data += '{gene},{pi},{contig},{cid},{inc}\n'\
.format(gene=gene,
pi=percent_identity,
contig=contig,
cid=results['cluster_id'],
inc=';'.join(sorted(inc_dict[str(results['cluster_id'])])))
except KeyError:
pass
# If there were no results associated with the strain, make the row the strain name only
if not result_bool:
data += '{sn}\n'.format(sn=sample.name)
# Write the string to the report
pbs.write(data) | [
"def",
"geneseekrsummary",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Creating predicted plasmid-borne gene summary table'",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportpath",
",",
"'plasmid_borne_summary.csv'",
")",
",",
"'w'",
")",
"as",
"pbs",
":",
"data",
"=",
"'Strain,Gene,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\\n'",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Create a flag to determine whether the strain name needs to be added to the data string if there",
"# were no results",
"result_bool",
"=",
"False",
"# Initialise a dictionary to store a set of all the incompatibility types listed for a contig.",
"# As the inc type will only be located on one of possibly several contigs associated with a predicted",
"# plasmid, it is nice to know details about the plasmid",
"inc_dict",
"=",
"dict",
"(",
")",
"# Iterate through all the MOB recon outputs to populate the incompatibility set",
"for",
"primarykey",
",",
"results",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report_dict",
".",
"items",
"(",
")",
":",
"try",
":",
"inc",
"=",
"results",
"[",
"'cluster_id'",
"]",
"# Convert the rep_type field (predicted incompatibilities) into a more a consistent",
"# format - pandas will call empty fields 'nan', which is a float",
"rep",
"=",
"str",
"(",
"results",
"[",
"'rep_type'",
"]",
")",
".",
"replace",
"(",
"','",
",",
"';'",
")",
"if",
"str",
"(",
"results",
"[",
"'rep_type'",
"]",
")",
"!=",
"'nan'",
"else",
"'ND'",
"# Add the incompatibility to the set",
"try",
":",
"inc_dict",
"[",
"inc",
"]",
".",
"add",
"(",
"rep",
")",
"except",
"KeyError",
":",
"inc_dict",
"[",
"inc",
"]",
"=",
"set",
"(",
")",
"inc_dict",
"[",
"inc",
"]",
".",
"add",
"(",
"rep",
")",
"except",
"KeyError",
":",
"pass",
"for",
"primarykey",
",",
"results",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report_dict",
".",
"items",
"(",
")",
":",
"try",
":",
"contig",
"=",
"results",
"[",
"'contig_id'",
"]",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
"# Unicycler gives contigs names such as: 3_length=187116_depth=1.60x_circular=true - test",
"# to see if the contig name looks unicycler-like, and set the name appropriately (in this",
"# case, it would be 3)",
"if",
"contig",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
".",
"startswith",
"(",
"'length'",
")",
":",
"contig",
"=",
"contig",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"for",
"gene",
",",
"result_dict",
"in",
"sample",
".",
"geneseekr_results",
".",
"sampledata",
".",
"items",
"(",
")",
":",
"if",
"contig",
"==",
"result_dict",
"[",
"'query_id'",
"]",
":",
"percent_identity",
"=",
"result_dict",
"[",
"'PercentIdentity'",
"]",
"# Set up the output string if the percent identity of the match is greater than the",
"# cutoff",
"if",
"float",
"(",
"result_dict",
"[",
"'PercentIdentity'",
"]",
")",
">=",
"self",
".",
"cutoff",
":",
"# As there was at least a single gene passing the threshold, set the boolean to True",
"result_bool",
"=",
"True",
"data",
"+=",
"'{sn},'",
".",
"format",
"(",
"sn",
"=",
"sample",
".",
"name",
")",
"data",
"+=",
"'{gene},{pi},{contig},{cid},{inc}\\n'",
".",
"format",
"(",
"gene",
"=",
"gene",
",",
"pi",
"=",
"percent_identity",
",",
"contig",
"=",
"contig",
",",
"cid",
"=",
"results",
"[",
"'cluster_id'",
"]",
",",
"inc",
"=",
"';'",
".",
"join",
"(",
"sorted",
"(",
"inc_dict",
"[",
"str",
"(",
"results",
"[",
"'cluster_id'",
"]",
")",
"]",
")",
")",
")",
"except",
"KeyError",
":",
"pass",
"# If there were no results associated with the strain, make the row the strain name only",
"if",
"not",
"result_bool",
":",
"data",
"+=",
"'{sn}\\n'",
".",
"format",
"(",
"sn",
"=",
"sample",
".",
"name",
")",
"# Write the string to the report",
"pbs",
".",
"write",
"(",
"data",
")"
]
| Create a report combining GeneSeekr and MOB Recon outputs | [
"Create",
"a",
"report",
"combining",
"GeneSeekr",
"and",
"MOB",
"Recon",
"outputs"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mobrecon.py#L198-L257 | train |
kevinconway/venvctrl | venvctrl/venv/command.py | CommandMixin._execute | def _execute(cmd):
"""Run a command in a subshell."""
cmd_parts = shlex.split(cmd)
if sys.version_info[0] < 3:
cmd_parts = shlex.split(cmd.encode('ascii'))
proc = subprocess.Popen(
cmd_parts,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
output=err,
)
return CommandResult(
code=proc.returncode,
out=out.decode('utf8'),
err=err.decode('utf8'),
) | python | def _execute(cmd):
"""Run a command in a subshell."""
cmd_parts = shlex.split(cmd)
if sys.version_info[0] < 3:
cmd_parts = shlex.split(cmd.encode('ascii'))
proc = subprocess.Popen(
cmd_parts,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
output=err,
)
return CommandResult(
code=proc.returncode,
out=out.decode('utf8'),
err=err.decode('utf8'),
) | [
"def",
"_execute",
"(",
"cmd",
")",
":",
"cmd_parts",
"=",
"shlex",
".",
"split",
"(",
"cmd",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"cmd_parts",
"=",
"shlex",
".",
"split",
"(",
"cmd",
".",
"encode",
"(",
"'ascii'",
")",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd_parts",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"out",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"proc",
".",
"returncode",
"!=",
"0",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"returncode",
"=",
"proc",
".",
"returncode",
",",
"cmd",
"=",
"cmd",
",",
"output",
"=",
"err",
",",
")",
"return",
"CommandResult",
"(",
"code",
"=",
"proc",
".",
"returncode",
",",
"out",
"=",
"out",
".",
"decode",
"(",
"'utf8'",
")",
",",
"err",
"=",
"err",
".",
"decode",
"(",
"'utf8'",
")",
",",
")"
]
| Run a command in a subshell. | [
"Run",
"a",
"command",
"in",
"a",
"subshell",
"."
]
| 36d4e0e4d5ebced6385a6ade1198f4769ff2df41 | https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L22-L47 | train |
kevinconway/venvctrl | venvctrl/venv/command.py | CommandMixin.cmd_path | def cmd_path(self, cmd):
"""Get the path of a command in the virtual if it exists.
Args:
cmd (str): The command to look for.
Returns:
str: The full path to the command.
Raises:
ValueError: If the command is not present.
"""
for binscript in self.bin.files:
if binscript.path.endswith('/{0}'.format(cmd)):
return binscript.path
raise ValueError('The command {0} was not found.'.format(cmd)) | python | def cmd_path(self, cmd):
"""Get the path of a command in the virtual if it exists.
Args:
cmd (str): The command to look for.
Returns:
str: The full path to the command.
Raises:
ValueError: If the command is not present.
"""
for binscript in self.bin.files:
if binscript.path.endswith('/{0}'.format(cmd)):
return binscript.path
raise ValueError('The command {0} was not found.'.format(cmd)) | [
"def",
"cmd_path",
"(",
"self",
",",
"cmd",
")",
":",
"for",
"binscript",
"in",
"self",
".",
"bin",
".",
"files",
":",
"if",
"binscript",
".",
"path",
".",
"endswith",
"(",
"'/{0}'",
".",
"format",
"(",
"cmd",
")",
")",
":",
"return",
"binscript",
".",
"path",
"raise",
"ValueError",
"(",
"'The command {0} was not found.'",
".",
"format",
"(",
"cmd",
")",
")"
]
| Get the path of a command in the virtual if it exists.
Args:
cmd (str): The command to look for.
Returns:
str: The full path to the command.
Raises:
ValueError: If the command is not present. | [
"Get",
"the",
"path",
"of",
"a",
"command",
"in",
"the",
"virtual",
"if",
"it",
"exists",
"."
]
| 36d4e0e4d5ebced6385a6ade1198f4769ff2df41 | https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L49-L67 | train |
kevinconway/venvctrl | venvctrl/venv/command.py | CommandMixin.python | def python(self, cmd):
"""Execute a python script using the virtual environment python."""
python_bin = self.cmd_path('python')
cmd = '{0} {1}'.format(python_bin, cmd)
return self._execute(cmd) | python | def python(self, cmd):
"""Execute a python script using the virtual environment python."""
python_bin = self.cmd_path('python')
cmd = '{0} {1}'.format(python_bin, cmd)
return self._execute(cmd) | [
"def",
"python",
"(",
"self",
",",
"cmd",
")",
":",
"python_bin",
"=",
"self",
".",
"cmd_path",
"(",
"'python'",
")",
"cmd",
"=",
"'{0} {1}'",
".",
"format",
"(",
"python_bin",
",",
"cmd",
")",
"return",
"self",
".",
"_execute",
"(",
"cmd",
")"
]
| Execute a python script using the virtual environment python. | [
"Execute",
"a",
"python",
"script",
"using",
"the",
"virtual",
"environment",
"python",
"."
]
| 36d4e0e4d5ebced6385a6ade1198f4769ff2df41 | https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L73-L77 | train |
kevinconway/venvctrl | venvctrl/venv/command.py | CommandMixin.pip | def pip(self, cmd):
"""Execute some pip function using the virtual environment pip."""
pip_bin = self.cmd_path('pip')
cmd = '{0} {1}'.format(pip_bin, cmd)
return self._execute(cmd) | python | def pip(self, cmd):
"""Execute some pip function using the virtual environment pip."""
pip_bin = self.cmd_path('pip')
cmd = '{0} {1}'.format(pip_bin, cmd)
return self._execute(cmd) | [
"def",
"pip",
"(",
"self",
",",
"cmd",
")",
":",
"pip_bin",
"=",
"self",
".",
"cmd_path",
"(",
"'pip'",
")",
"cmd",
"=",
"'{0} {1}'",
".",
"format",
"(",
"pip_bin",
",",
"cmd",
")",
"return",
"self",
".",
"_execute",
"(",
"cmd",
")"
]
| Execute some pip function using the virtual environment pip. | [
"Execute",
"some",
"pip",
"function",
"using",
"the",
"virtual",
"environment",
"pip",
"."
]
| 36d4e0e4d5ebced6385a6ade1198f4769ff2df41 | https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/command.py#L79-L83 | train |
lowandrew/OLCTools | spadespipeline/GeneSeekr.py | GeneSeekr.filterunique | def filterunique(self):
"""
Filters multiple BLAST hits in a common region of the genome. Leaves only the best hit
"""
for sample in self.metadata:
# Initialise variables
sample[self.analysistype].blastresults = list()
resultdict = dict()
rowdict = dict()
try:
# Iterate through all the contigs, which had BLAST hits
for contig in sample[self.analysistype].queryranges:
# Find all the locations in each contig that correspond to the BLAST hits
for location in sample[self.analysistype].queryranges[contig]:
# Extract the BLAST result dictionary for the contig
for row in sample[self.analysistype].results[contig]:
# Initialise variable to reduce the number of times row['value'] needs to be typed
contig = row['query_id']
high = row['high']
low = row['low']
percentidentity = row['percentidentity']
# Join the two ranges in the location list with a comma
locstr = ','.join([str(x) for x in location])
# Create a set of the location of all the base pairs between the low and high (-1) e.g.
# [6, 10] would give 6, 7, 8, 9, but NOT 10. This turns out to be useful, as there are
# genes located back-to-back in the genome e.g. strB and strA, with locations of 2557,3393
# and 3393,4196, respectively. By not including 3393 in the strB calculations, I don't
# have to worry about this single bp overlap
loc = set(range(low, high))
# Use a set intersection to determine whether the current result overlaps with location
# This will allow all the hits to be grouped together based on their location
if loc.intersection(set(range(location[0], location[1]))):
# Populate the grouped hits for each location
try:
resultdict[contig][locstr].append(percentidentity)
rowdict[contig][locstr].append(row)
# Initialise and populate the lists of the nested dictionary
except KeyError:
try:
resultdict[contig][locstr] = list()
resultdict[contig][locstr].append(percentidentity)
rowdict[contig][locstr] = list()
rowdict[contig][locstr].append(row)
# As this is a nested dictionary, it needs to be initialised here
except KeyError:
resultdict[contig] = dict()
resultdict[contig][locstr] = list()
resultdict[contig][locstr].append(percentidentity)
rowdict[contig] = dict()
rowdict[contig][locstr] = list()
rowdict[contig][locstr].append(row)
except KeyError:
pass
# Find the best hit for each location based on percent identity
for contig in resultdict:
# Do not allow the same gene to be added to the dictionary more than once
genes = list()
for location in resultdict[contig]:
# Initialise a variable to determine whether there is already a best hit found for the location
multiple = False
# Iterate through the BLAST results to find the best hit
for row in rowdict[contig][location]:
# Add the best hit to the .blastresults attribute of the object
if row['percentidentity'] == max(resultdict[contig][location]) and not multiple \
and row['subject_id'] not in genes:
sample[self.analysistype].blastresults.append(row)
genes.append(row['subject_id'])
multiple = True | python | def filterunique(self):
"""
Filters multiple BLAST hits in a common region of the genome. Leaves only the best hit
"""
for sample in self.metadata:
# Initialise variables
sample[self.analysistype].blastresults = list()
resultdict = dict()
rowdict = dict()
try:
# Iterate through all the contigs, which had BLAST hits
for contig in sample[self.analysistype].queryranges:
# Find all the locations in each contig that correspond to the BLAST hits
for location in sample[self.analysistype].queryranges[contig]:
# Extract the BLAST result dictionary for the contig
for row in sample[self.analysistype].results[contig]:
# Initialise variable to reduce the number of times row['value'] needs to be typed
contig = row['query_id']
high = row['high']
low = row['low']
percentidentity = row['percentidentity']
# Join the two ranges in the location list with a comma
locstr = ','.join([str(x) for x in location])
# Create a set of the location of all the base pairs between the low and high (-1) e.g.
# [6, 10] would give 6, 7, 8, 9, but NOT 10. This turns out to be useful, as there are
# genes located back-to-back in the genome e.g. strB and strA, with locations of 2557,3393
# and 3393,4196, respectively. By not including 3393 in the strB calculations, I don't
# have to worry about this single bp overlap
loc = set(range(low, high))
# Use a set intersection to determine whether the current result overlaps with location
# This will allow all the hits to be grouped together based on their location
if loc.intersection(set(range(location[0], location[1]))):
# Populate the grouped hits for each location
try:
resultdict[contig][locstr].append(percentidentity)
rowdict[contig][locstr].append(row)
# Initialise and populate the lists of the nested dictionary
except KeyError:
try:
resultdict[contig][locstr] = list()
resultdict[contig][locstr].append(percentidentity)
rowdict[contig][locstr] = list()
rowdict[contig][locstr].append(row)
# As this is a nested dictionary, it needs to be initialised here
except KeyError:
resultdict[contig] = dict()
resultdict[contig][locstr] = list()
resultdict[contig][locstr].append(percentidentity)
rowdict[contig] = dict()
rowdict[contig][locstr] = list()
rowdict[contig][locstr].append(row)
except KeyError:
pass
# Find the best hit for each location based on percent identity
for contig in resultdict:
# Do not allow the same gene to be added to the dictionary more than once
genes = list()
for location in resultdict[contig]:
# Initialise a variable to determine whether there is already a best hit found for the location
multiple = False
# Iterate through the BLAST results to find the best hit
for row in rowdict[contig][location]:
# Add the best hit to the .blastresults attribute of the object
if row['percentidentity'] == max(resultdict[contig][location]) and not multiple \
and row['subject_id'] not in genes:
sample[self.analysistype].blastresults.append(row)
genes.append(row['subject_id'])
multiple = True | [
"def",
"filterunique",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise variables",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastresults",
"=",
"list",
"(",
")",
"resultdict",
"=",
"dict",
"(",
")",
"rowdict",
"=",
"dict",
"(",
")",
"try",
":",
"# Iterate through all the contigs, which had BLAST hits",
"for",
"contig",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"queryranges",
":",
"# Find all the locations in each contig that correspond to the BLAST hits",
"for",
"location",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"queryranges",
"[",
"contig",
"]",
":",
"# Extract the BLAST result dictionary for the contig",
"for",
"row",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"results",
"[",
"contig",
"]",
":",
"# Initialise variable to reduce the number of times row['value'] needs to be typed",
"contig",
"=",
"row",
"[",
"'query_id'",
"]",
"high",
"=",
"row",
"[",
"'high'",
"]",
"low",
"=",
"row",
"[",
"'low'",
"]",
"percentidentity",
"=",
"row",
"[",
"'percentidentity'",
"]",
"# Join the two ranges in the location list with a comma",
"locstr",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"location",
"]",
")",
"# Create a set of the location of all the base pairs between the low and high (-1) e.g.",
"# [6, 10] would give 6, 7, 8, 9, but NOT 10. This turns out to be useful, as there are",
"# genes located back-to-back in the genome e.g. strB and strA, with locations of 2557,3393",
"# and 3393,4196, respectively. By not including 3393 in the strB calculations, I don't",
"# have to worry about this single bp overlap",
"loc",
"=",
"set",
"(",
"range",
"(",
"low",
",",
"high",
")",
")",
"# Use a set intersection to determine whether the current result overlaps with location",
"# This will allow all the hits to be grouped together based on their location",
"if",
"loc",
".",
"intersection",
"(",
"set",
"(",
"range",
"(",
"location",
"[",
"0",
"]",
",",
"location",
"[",
"1",
"]",
")",
")",
")",
":",
"# Populate the grouped hits for each location",
"try",
":",
"resultdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"percentidentity",
")",
"rowdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"row",
")",
"# Initialise and populate the lists of the nested dictionary",
"except",
"KeyError",
":",
"try",
":",
"resultdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
"=",
"list",
"(",
")",
"resultdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"percentidentity",
")",
"rowdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
"=",
"list",
"(",
")",
"rowdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"row",
")",
"# As this is a nested dictionary, it needs to be initialised here",
"except",
"KeyError",
":",
"resultdict",
"[",
"contig",
"]",
"=",
"dict",
"(",
")",
"resultdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
"=",
"list",
"(",
")",
"resultdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"percentidentity",
")",
"rowdict",
"[",
"contig",
"]",
"=",
"dict",
"(",
")",
"rowdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
"=",
"list",
"(",
")",
"rowdict",
"[",
"contig",
"]",
"[",
"locstr",
"]",
".",
"append",
"(",
"row",
")",
"except",
"KeyError",
":",
"pass",
"# Find the best hit for each location based on percent identity",
"for",
"contig",
"in",
"resultdict",
":",
"# Do not allow the same gene to be added to the dictionary more than once",
"genes",
"=",
"list",
"(",
")",
"for",
"location",
"in",
"resultdict",
"[",
"contig",
"]",
":",
"# Initialise a variable to determine whether there is already a best hit found for the location",
"multiple",
"=",
"False",
"# Iterate through the BLAST results to find the best hit",
"for",
"row",
"in",
"rowdict",
"[",
"contig",
"]",
"[",
"location",
"]",
":",
"# Add the best hit to the .blastresults attribute of the object",
"if",
"row",
"[",
"'percentidentity'",
"]",
"==",
"max",
"(",
"resultdict",
"[",
"contig",
"]",
"[",
"location",
"]",
")",
"and",
"not",
"multiple",
"and",
"row",
"[",
"'subject_id'",
"]",
"not",
"in",
"genes",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastresults",
".",
"append",
"(",
"row",
")",
"genes",
".",
"append",
"(",
"row",
"[",
"'subject_id'",
"]",
")",
"multiple",
"=",
"True"
]
| Filters multiple BLAST hits in a common region of the genome. Leaves only the best hit | [
"Filters",
"multiple",
"BLAST",
"hits",
"in",
"a",
"common",
"region",
"of",
"the",
"genome",
".",
"Leaves",
"only",
"the",
"best",
"hit"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/GeneSeekr.py#L51-L118 | train |
lowandrew/OLCTools | spadespipeline/GeneSeekr.py | GeneSeekr.makedbthreads | def makedbthreads(self):
"""
Setup and create threads for class
"""
# Find all the target folders in the analysis and add them to the targetfolders set
for sample in self.metadata:
if sample[self.analysistype].combinedtargets != 'NA':
self.targetfolders.add(sample[self.analysistype].targetpath)
# Create and start threads for each fasta file in the list
for i in range(len(self.targetfolders)):
# Send the threads to makeblastdb
threads = Thread(target=self.makeblastdb, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# Make blast databases for MLST files (if necessary)
for targetdir in self.targetfolders:
# List comprehension to remove any previously created database files from list
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
try:
_ = self.targetfiles[0]
except IndexError:
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
for targetfile in self.targetfiles:
# Read the sequences from the target file to a dictionary
self.records[targetfile] = SeqIO.to_dict(SeqIO.parse(targetfile, 'fasta'))
# Add the fasta file to the queue
self.dqueue.put(targetfile)
self.dqueue.join() | python | def makedbthreads(self):
"""
Setup and create threads for class
"""
# Find all the target folders in the analysis and add them to the targetfolders set
for sample in self.metadata:
if sample[self.analysistype].combinedtargets != 'NA':
self.targetfolders.add(sample[self.analysistype].targetpath)
# Create and start threads for each fasta file in the list
for i in range(len(self.targetfolders)):
# Send the threads to makeblastdb
threads = Thread(target=self.makeblastdb, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# Make blast databases for MLST files (if necessary)
for targetdir in self.targetfolders:
# List comprehension to remove any previously created database files from list
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
try:
_ = self.targetfiles[0]
except IndexError:
self.targetfiles = glob(os.path.join(targetdir, '*.fasta'))
for targetfile in self.targetfiles:
# Read the sequences from the target file to a dictionary
self.records[targetfile] = SeqIO.to_dict(SeqIO.parse(targetfile, 'fasta'))
# Add the fasta file to the queue
self.dqueue.put(targetfile)
self.dqueue.join() | [
"def",
"makedbthreads",
"(",
"self",
")",
":",
"# Find all the target folders in the analysis and add them to the targetfolders set",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"combinedtargets",
"!=",
"'NA'",
":",
"self",
".",
"targetfolders",
".",
"add",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targetpath",
")",
"# Create and start threads for each fasta file in the list",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"targetfolders",
")",
")",
":",
"# Send the threads to makeblastdb",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"makeblastdb",
",",
"args",
"=",
"(",
")",
")",
"# Set the daemon to true - something to do with thread management",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"# Start the threading",
"threads",
".",
"start",
"(",
")",
"# Make blast databases for MLST files (if necessary)",
"for",
"targetdir",
"in",
"self",
".",
"targetfolders",
":",
"# List comprehension to remove any previously created database files from list",
"self",
".",
"targetfiles",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"targetdir",
",",
"'*.fasta'",
")",
")",
"try",
":",
"_",
"=",
"self",
".",
"targetfiles",
"[",
"0",
"]",
"except",
"IndexError",
":",
"self",
".",
"targetfiles",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"targetdir",
",",
"'*.fasta'",
")",
")",
"for",
"targetfile",
"in",
"self",
".",
"targetfiles",
":",
"# Read the sequences from the target file to a dictionary",
"self",
".",
"records",
"[",
"targetfile",
"]",
"=",
"SeqIO",
".",
"to_dict",
"(",
"SeqIO",
".",
"parse",
"(",
"targetfile",
",",
"'fasta'",
")",
")",
"# Add the fasta file to the queue",
"self",
".",
"dqueue",
".",
"put",
"(",
"targetfile",
")",
"self",
".",
"dqueue",
".",
"join",
"(",
")"
]
| Setup and create threads for class | [
"Setup",
"and",
"create",
"threads",
"for",
"class"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/GeneSeekr.py#L120-L149 | train |
grahame/dividebatur | dividebatur/senatecount.py | check_config | def check_config(config):
"basic checks that the configuration file is valid"
shortnames = [count['shortname'] for count in config['count']]
if len(shortnames) != len(set(shortnames)):
logger.error("error: duplicate `shortname' in count configuration.")
return False
return True | python | def check_config(config):
"basic checks that the configuration file is valid"
shortnames = [count['shortname'] for count in config['count']]
if len(shortnames) != len(set(shortnames)):
logger.error("error: duplicate `shortname' in count configuration.")
return False
return True | [
"def",
"check_config",
"(",
"config",
")",
":",
"shortnames",
"=",
"[",
"count",
"[",
"'shortname'",
"]",
"for",
"count",
"in",
"config",
"[",
"'count'",
"]",
"]",
"if",
"len",
"(",
"shortnames",
")",
"!=",
"len",
"(",
"set",
"(",
"shortnames",
")",
")",
":",
"logger",
".",
"error",
"(",
"\"error: duplicate `shortname' in count configuration.\"",
")",
"return",
"False",
"return",
"True"
]
| basic checks that the configuration file is valid | [
"basic",
"checks",
"that",
"the",
"configuration",
"file",
"is",
"valid"
]
| adc1f6e8013943471f1679e3c94f9448a1e4a472 | https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/senatecount.py#L464-L470 | train |
grahame/dividebatur | dividebatur/senatecount.py | Automation._qstr | def _qstr(self, question):
"we need to cope with a list, or a list of lists"
parts = []
for entry in question:
if type(entry) is list:
parts.append(self._qstr(entry))
else:
parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry))
return ', '.join(parts) | python | def _qstr(self, question):
"we need to cope with a list, or a list of lists"
parts = []
for entry in question:
if type(entry) is list:
parts.append(self._qstr(entry))
else:
parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry))
return ', '.join(parts) | [
"def",
"_qstr",
"(",
"self",
",",
"question",
")",
":",
"parts",
"=",
"[",
"]",
"for",
"entry",
"in",
"question",
":",
"if",
"type",
"(",
"entry",
")",
"is",
"list",
":",
"parts",
".",
"append",
"(",
"self",
".",
"_qstr",
"(",
"entry",
")",
")",
"else",
":",
"parts",
".",
"append",
"(",
"'\"%s\"<%d>'",
"%",
"(",
"self",
".",
"_count_data",
".",
"get_candidate_title",
"(",
"entry",
")",
",",
"entry",
")",
")",
"return",
"', '",
".",
"join",
"(",
"parts",
")"
]
| we need to cope with a list, or a list of lists | [
"we",
"need",
"to",
"cope",
"with",
"a",
"list",
"or",
"a",
"list",
"of",
"lists"
]
| adc1f6e8013943471f1679e3c94f9448a1e4a472 | https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/senatecount.py#L333-L341 | train |
grahame/dividebatur | dividebatur/senatecount.py | Automation.create_callback | def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug("%s: asked to choose between: %s" % (self._name, self._qstr(question_posed)))
if self._upto == len(self._data):
logger.error("%s: out of automation data, requested to pick between %s" % (self._name, self._qstr(question_posed)))
raise AutomationException("out of automation data")
question_archived, answer = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed)))
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback | python | def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug("%s: asked to choose between: %s" % (self._name, self._qstr(question_posed)))
if self._upto == len(self._data):
logger.error("%s: out of automation data, requested to pick between %s" % (self._name, self._qstr(question_posed)))
raise AutomationException("out of automation data")
question_archived, answer = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed)))
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback | [
"def",
"create_callback",
"(",
"self",
")",
":",
"def",
"__callback",
"(",
"question_posed",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s: asked to choose between: %s\"",
"%",
"(",
"self",
".",
"_name",
",",
"self",
".",
"_qstr",
"(",
"question_posed",
")",
")",
")",
"if",
"self",
".",
"_upto",
"==",
"len",
"(",
"self",
".",
"_data",
")",
":",
"logger",
".",
"error",
"(",
"\"%s: out of automation data, requested to pick between %s\"",
"%",
"(",
"self",
".",
"_name",
",",
"self",
".",
"_qstr",
"(",
"question_posed",
")",
")",
")",
"raise",
"AutomationException",
"(",
"\"out of automation data\"",
")",
"question_archived",
",",
"answer",
"=",
"self",
".",
"_data",
"[",
"self",
".",
"_upto",
"]",
"if",
"question_archived",
"!=",
"question_posed",
":",
"logger",
".",
"error",
"(",
"\"%s: automation data mismatch, expected question `%s', got question `%s'\"",
"%",
"(",
"self",
".",
"_name",
",",
"self",
".",
"_qstr",
"(",
"question_archived",
")",
",",
"self",
".",
"_qstr",
"(",
"question_posed",
")",
")",
")",
"resp",
"=",
"question_posed",
".",
"index",
"(",
"answer",
")",
"self",
".",
"_upto",
"+=",
"1",
"return",
"resp",
"return",
"__callback"
]
| create a callback, suitable to be passed to SenateCounter | [
"create",
"a",
"callback",
"suitable",
"to",
"be",
"passed",
"to",
"SenateCounter"
]
| adc1f6e8013943471f1679e3c94f9448a1e4a472 | https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/senatecount.py#L343-L358 | train |
jmbhughes/suvi-trainer | scripts/fetch_hek_labeled.py | main | def main():
"""
fetches hek data and makes thematic maps as requested
"""
args = get_args()
config = Config(args.config)
# Load dates
if os.path.isfile(args.dates):
with open(args.dates) as f:
dates = [dateparser.parse(line.split(" ")[0]) for line in f.readlines()]
else: # assume it's a date
dates = [dateparser.parse(args.dates)]
if args.verbose:
print("Dates are:")
for date in dates:
print(date)
for date in dates:
if args.verbose:
print('Processing {}'.format(date))
suvi_data = Fetcher(date, ['suvi-l2-ci195'],
suvi_composite_path=config.suvi_composite_path).fetch(multithread=False)['suvi-l2-ci195']
if suvi_data[0] is not None:
config.expert = 'HEK'
responses = query_hek(date)
thmap = make_thmap(suvi_data, responses, config)
Outgest(os.path.join(args.output, "thmap_hek_{}.fits".format(date.strftime("%Y%m%d%H%M%S"))),
thmap, {"c195": suvi_data[0], "suvi-l2-ci195": suvi_data[0]}, args.config).save() | python | def main():
"""
fetches hek data and makes thematic maps as requested
"""
args = get_args()
config = Config(args.config)
# Load dates
if os.path.isfile(args.dates):
with open(args.dates) as f:
dates = [dateparser.parse(line.split(" ")[0]) for line in f.readlines()]
else: # assume it's a date
dates = [dateparser.parse(args.dates)]
if args.verbose:
print("Dates are:")
for date in dates:
print(date)
for date in dates:
if args.verbose:
print('Processing {}'.format(date))
suvi_data = Fetcher(date, ['suvi-l2-ci195'],
suvi_composite_path=config.suvi_composite_path).fetch(multithread=False)['suvi-l2-ci195']
if suvi_data[0] is not None:
config.expert = 'HEK'
responses = query_hek(date)
thmap = make_thmap(suvi_data, responses, config)
Outgest(os.path.join(args.output, "thmap_hek_{}.fits".format(date.strftime("%Y%m%d%H%M%S"))),
thmap, {"c195": suvi_data[0], "suvi-l2-ci195": suvi_data[0]}, args.config).save() | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"get_args",
"(",
")",
"config",
"=",
"Config",
"(",
"args",
".",
"config",
")",
"# Load dates",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"args",
".",
"dates",
")",
":",
"with",
"open",
"(",
"args",
".",
"dates",
")",
"as",
"f",
":",
"dates",
"=",
"[",
"dateparser",
".",
"parse",
"(",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"else",
":",
"# assume it's a date",
"dates",
"=",
"[",
"dateparser",
".",
"parse",
"(",
"args",
".",
"dates",
")",
"]",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"Dates are:\"",
")",
"for",
"date",
"in",
"dates",
":",
"print",
"(",
"date",
")",
"for",
"date",
"in",
"dates",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"'Processing {}'",
".",
"format",
"(",
"date",
")",
")",
"suvi_data",
"=",
"Fetcher",
"(",
"date",
",",
"[",
"'suvi-l2-ci195'",
"]",
",",
"suvi_composite_path",
"=",
"config",
".",
"suvi_composite_path",
")",
".",
"fetch",
"(",
"multithread",
"=",
"False",
")",
"[",
"'suvi-l2-ci195'",
"]",
"if",
"suvi_data",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"config",
".",
"expert",
"=",
"'HEK'",
"responses",
"=",
"query_hek",
"(",
"date",
")",
"thmap",
"=",
"make_thmap",
"(",
"suvi_data",
",",
"responses",
",",
"config",
")",
"Outgest",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output",
",",
"\"thmap_hek_{}.fits\"",
".",
"format",
"(",
"date",
".",
"strftime",
"(",
"\"%Y%m%d%H%M%S\"",
")",
")",
")",
",",
"thmap",
",",
"{",
"\"c195\"",
":",
"suvi_data",
"[",
"0",
"]",
",",
"\"suvi-l2-ci195\"",
":",
"suvi_data",
"[",
"0",
"]",
"}",
",",
"args",
".",
"config",
")",
".",
"save",
"(",
")"
]
| fetches hek data and makes thematic maps as requested | [
"fetches",
"hek",
"data",
"and",
"makes",
"thematic",
"maps",
"as",
"requested"
]
| 3d89894a4a037286221974c7eb5634d229b4f5d4 | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/scripts/fetch_hek_labeled.py#L85-L114 | train |
TorkamaniLab/metapipe | metapipe/models/job_template.py | JobTemplate._get_jobs_from_template | def _get_jobs_from_template(self, template, job_class):
""" Given a template, a job class, construct jobs from
the given template.
"""
jobs = []
for command in template.eval():
alias = command.alias
depends_on = [job.alias
for job in self.queue.all_jobs
for deps in command.depends_on
if deps == job.alias]
command.update_dependent_files([job.command
for job in self.queue.all_jobs
if not isinstance(job, JobTemplate)])
job = job_class(alias, command, depends_on)
jobs.append(job)
return jobs | python | def _get_jobs_from_template(self, template, job_class):
""" Given a template, a job class, construct jobs from
the given template.
"""
jobs = []
for command in template.eval():
alias = command.alias
depends_on = [job.alias
for job in self.queue.all_jobs
for deps in command.depends_on
if deps == job.alias]
command.update_dependent_files([job.command
for job in self.queue.all_jobs
if not isinstance(job, JobTemplate)])
job = job_class(alias, command, depends_on)
jobs.append(job)
return jobs | [
"def",
"_get_jobs_from_template",
"(",
"self",
",",
"template",
",",
"job_class",
")",
":",
"jobs",
"=",
"[",
"]",
"for",
"command",
"in",
"template",
".",
"eval",
"(",
")",
":",
"alias",
"=",
"command",
".",
"alias",
"depends_on",
"=",
"[",
"job",
".",
"alias",
"for",
"job",
"in",
"self",
".",
"queue",
".",
"all_jobs",
"for",
"deps",
"in",
"command",
".",
"depends_on",
"if",
"deps",
"==",
"job",
".",
"alias",
"]",
"command",
".",
"update_dependent_files",
"(",
"[",
"job",
".",
"command",
"for",
"job",
"in",
"self",
".",
"queue",
".",
"all_jobs",
"if",
"not",
"isinstance",
"(",
"job",
",",
"JobTemplate",
")",
"]",
")",
"job",
"=",
"job_class",
"(",
"alias",
",",
"command",
",",
"depends_on",
")",
"jobs",
".",
"append",
"(",
"job",
")",
"return",
"jobs"
]
| Given a template, a job class, construct jobs from
the given template. | [
"Given",
"a",
"template",
"a",
"job",
"class",
"construct",
"jobs",
"from",
"the",
"given",
"template",
"."
]
| 15592e5b0c217afb00ac03503f8d0d7453d4baf4 | https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/job_template.py#L47-L64 | train |
NoviceLive/intellicoder | intellicoder/executables/pe.py | PE.get_export_table | def get_export_table(self):
"""Get the export table."""
symbols = self.binary.DIRECTORY_ENTRY_EXPORT.symbols
names = AttrsGetter(symbols, join=False).name
return names | python | def get_export_table(self):
"""Get the export table."""
symbols = self.binary.DIRECTORY_ENTRY_EXPORT.symbols
names = AttrsGetter(symbols, join=False).name
return names | [
"def",
"get_export_table",
"(",
"self",
")",
":",
"symbols",
"=",
"self",
".",
"binary",
".",
"DIRECTORY_ENTRY_EXPORT",
".",
"symbols",
"names",
"=",
"AttrsGetter",
"(",
"symbols",
",",
"join",
"=",
"False",
")",
".",
"name",
"return",
"names"
]
| Get the export table. | [
"Get",
"the",
"export",
"table",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/executables/pe.py#L54-L58 | train |
sholsapp/py509 | py509/x509.py | resolve_pkix_certificate | def resolve_pkix_certificate(url):
"""Resolve a certificate from a remote host.
Extensions like the authority information access extension point to
certificates hosted on remote servers. This functionc an be used to
download and load the certificate.
:param str url: The URL to resolve a certificate from.
:returns: The certificate.
:rtype: OpenSSL.crypto.X509
"""
http = urllib3.PoolManager()
rsp = http.request('GET', url, headers={'Content-Type': 'application/pkix-cert'})
if rsp.status == 200:
# if strict_compliance and 'application/x-x509-ca-cert' not in rsp.headers:
# # This web server's response isn't following the RFC, but might contain
# # data representing a DER encoded certificate.
# return
try:
return load_certificate(crypto.FILETYPE_ASN1, rsp.data)
except crypto.Error:
log.error('Failed to load DER encoded certificate from %s', url)
try:
return load_certificate(crypto.FILETYPE_PEM, rsp.data)
except crypto.Error:
log.error('Failed to load PEM encoded certificate from %s', url)
raise RuntimeError('Failed to load any certificate from %s', url)
else:
raise RuntimeError('Failed to fetch intermediate certificate at {0}!'.format(url)) | python | def resolve_pkix_certificate(url):
"""Resolve a certificate from a remote host.
Extensions like the authority information access extension point to
certificates hosted on remote servers. This functionc an be used to
download and load the certificate.
:param str url: The URL to resolve a certificate from.
:returns: The certificate.
:rtype: OpenSSL.crypto.X509
"""
http = urllib3.PoolManager()
rsp = http.request('GET', url, headers={'Content-Type': 'application/pkix-cert'})
if rsp.status == 200:
# if strict_compliance and 'application/x-x509-ca-cert' not in rsp.headers:
# # This web server's response isn't following the RFC, but might contain
# # data representing a DER encoded certificate.
# return
try:
return load_certificate(crypto.FILETYPE_ASN1, rsp.data)
except crypto.Error:
log.error('Failed to load DER encoded certificate from %s', url)
try:
return load_certificate(crypto.FILETYPE_PEM, rsp.data)
except crypto.Error:
log.error('Failed to load PEM encoded certificate from %s', url)
raise RuntimeError('Failed to load any certificate from %s', url)
else:
raise RuntimeError('Failed to fetch intermediate certificate at {0}!'.format(url)) | [
"def",
"resolve_pkix_certificate",
"(",
"url",
")",
":",
"http",
"=",
"urllib3",
".",
"PoolManager",
"(",
")",
"rsp",
"=",
"http",
".",
"request",
"(",
"'GET'",
",",
"url",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/pkix-cert'",
"}",
")",
"if",
"rsp",
".",
"status",
"==",
"200",
":",
"# if strict_compliance and 'application/x-x509-ca-cert' not in rsp.headers:",
"# # This web server's response isn't following the RFC, but might contain",
"# # data representing a DER encoded certificate.",
"# return",
"try",
":",
"return",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_ASN1",
",",
"rsp",
".",
"data",
")",
"except",
"crypto",
".",
"Error",
":",
"log",
".",
"error",
"(",
"'Failed to load DER encoded certificate from %s'",
",",
"url",
")",
"try",
":",
"return",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"rsp",
".",
"data",
")",
"except",
"crypto",
".",
"Error",
":",
"log",
".",
"error",
"(",
"'Failed to load PEM encoded certificate from %s'",
",",
"url",
")",
"raise",
"RuntimeError",
"(",
"'Failed to load any certificate from %s'",
",",
"url",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Failed to fetch intermediate certificate at {0}!'",
".",
"format",
"(",
"url",
")",
")"
]
| Resolve a certificate from a remote host.
Extensions like the authority information access extension point to
certificates hosted on remote servers. This functionc an be used to
download and load the certificate.
:param str url: The URL to resolve a certificate from.
:returns: The certificate.
:rtype: OpenSSL.crypto.X509 | [
"Resolve",
"a",
"certificate",
"from",
"a",
"remote",
"host",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L14-L43 | train |
sholsapp/py509 | py509/x509.py | make_certificate_signing_request | def make_certificate_signing_request(pkey, digest='sha512', **name):
"""Make a certificate signing request.
:param OpenSSL.crypto.PKey pkey: A private key.
:param str digest: A valid digest to use. For example, `sha512`.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A certificate signing request.
:rtype: :class:`OpenSSL.crypto.X509Request`
"""
csr = crypto.X509Req()
subj = csr.get_subject()
subj.C = name.get('C', 'US')
subj.ST = name.get('ST', 'CA')
subj.L = name.get('L', 'Home')
subj.O = name.get('O', 'Home')
subj.OU = name.get('OU', 'Unit')
subj.CN = name.get('CN', 'Common')
csr.set_pubkey(pkey)
csr.set_version(3)
csr.sign(pkey, digest)
return csr | python | def make_certificate_signing_request(pkey, digest='sha512', **name):
"""Make a certificate signing request.
:param OpenSSL.crypto.PKey pkey: A private key.
:param str digest: A valid digest to use. For example, `sha512`.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A certificate signing request.
:rtype: :class:`OpenSSL.crypto.X509Request`
"""
csr = crypto.X509Req()
subj = csr.get_subject()
subj.C = name.get('C', 'US')
subj.ST = name.get('ST', 'CA')
subj.L = name.get('L', 'Home')
subj.O = name.get('O', 'Home')
subj.OU = name.get('OU', 'Unit')
subj.CN = name.get('CN', 'Common')
csr.set_pubkey(pkey)
csr.set_version(3)
csr.sign(pkey, digest)
return csr | [
"def",
"make_certificate_signing_request",
"(",
"pkey",
",",
"digest",
"=",
"'sha512'",
",",
"*",
"*",
"name",
")",
":",
"csr",
"=",
"crypto",
".",
"X509Req",
"(",
")",
"subj",
"=",
"csr",
".",
"get_subject",
"(",
")",
"subj",
".",
"C",
"=",
"name",
".",
"get",
"(",
"'C'",
",",
"'US'",
")",
"subj",
".",
"ST",
"=",
"name",
".",
"get",
"(",
"'ST'",
",",
"'CA'",
")",
"subj",
".",
"L",
"=",
"name",
".",
"get",
"(",
"'L'",
",",
"'Home'",
")",
"subj",
".",
"O",
"=",
"name",
".",
"get",
"(",
"'O'",
",",
"'Home'",
")",
"subj",
".",
"OU",
"=",
"name",
".",
"get",
"(",
"'OU'",
",",
"'Unit'",
")",
"subj",
".",
"CN",
"=",
"name",
".",
"get",
"(",
"'CN'",
",",
"'Common'",
")",
"csr",
".",
"set_pubkey",
"(",
"pkey",
")",
"csr",
".",
"set_version",
"(",
"3",
")",
"csr",
".",
"sign",
"(",
"pkey",
",",
"digest",
")",
"return",
"csr"
]
| Make a certificate signing request.
:param OpenSSL.crypto.PKey pkey: A private key.
:param str digest: A valid digest to use. For example, `sha512`.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A certificate signing request.
:rtype: :class:`OpenSSL.crypto.X509Request` | [
"Make",
"a",
"certificate",
"signing",
"request",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L71-L93 | train |
sholsapp/py509 | py509/x509.py | make_certificate | def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()):
"""Make a certificate.
The following extensions are added to all certificates in the following order
*before* additional extensions specified by `exts` kwarg:
- subjectKeyIdentifier
- authorityKeyIdentifier
:param OpenSSL.crypto.X509Request csr: A certificate signing request.
:param OpenSSL.crypto.PKey ca_key: The signing authority's key.
:param OpenSSL.crypto.X509 ca_cert: The signing authority's certificate.
:param int serial: A serial number.
:param int not_before: A number of seconds from now to wait before the certificate is valid.
:param int not_after: A number of seconds from now to wait before expiring the certificate.
:param str digest: A valid digest.
:param int version: The version of SSL to use with this certificate.
:param list[OpenSSL.crypto.X509Extension] exts: A list of extensions to add to this certificate.
:return: A X.509 certificate.
:rtype: :class:`OpenSSL.crypto.X509`
"""
crt = crypto.X509()
crt.set_serial_number(serial)
crt.gmtime_adj_notBefore(not_before)
crt.gmtime_adj_notAfter(not_after)
crt.set_issuer(ca_cert.get_subject())
crt.set_subject(csr.get_subject())
crt.set_pubkey(csr.get_pubkey())
crt.set_version(version)
crt.add_extensions([
crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=crt)])
if ca_cert.get_subject() == crt.get_subject():
crt.add_extensions([
crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=crt)])
else:
crt.add_extensions([
crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=ca_cert)])
crt.add_extensions(exts)
crt.sign(ca_key, digest)
return crt | python | def make_certificate(csr, ca_key, ca_cert, serial, not_before, not_after, digest='sha512', version=2, exts=()):
"""Make a certificate.
The following extensions are added to all certificates in the following order
*before* additional extensions specified by `exts` kwarg:
- subjectKeyIdentifier
- authorityKeyIdentifier
:param OpenSSL.crypto.X509Request csr: A certificate signing request.
:param OpenSSL.crypto.PKey ca_key: The signing authority's key.
:param OpenSSL.crypto.X509 ca_cert: The signing authority's certificate.
:param int serial: A serial number.
:param int not_before: A number of seconds from now to wait before the certificate is valid.
:param int not_after: A number of seconds from now to wait before expiring the certificate.
:param str digest: A valid digest.
:param int version: The version of SSL to use with this certificate.
:param list[OpenSSL.crypto.X509Extension] exts: A list of extensions to add to this certificate.
:return: A X.509 certificate.
:rtype: :class:`OpenSSL.crypto.X509`
"""
crt = crypto.X509()
crt.set_serial_number(serial)
crt.gmtime_adj_notBefore(not_before)
crt.gmtime_adj_notAfter(not_after)
crt.set_issuer(ca_cert.get_subject())
crt.set_subject(csr.get_subject())
crt.set_pubkey(csr.get_pubkey())
crt.set_version(version)
crt.add_extensions([
crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=crt)])
if ca_cert.get_subject() == crt.get_subject():
crt.add_extensions([
crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=crt)])
else:
crt.add_extensions([
crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=ca_cert)])
crt.add_extensions(exts)
crt.sign(ca_key, digest)
return crt | [
"def",
"make_certificate",
"(",
"csr",
",",
"ca_key",
",",
"ca_cert",
",",
"serial",
",",
"not_before",
",",
"not_after",
",",
"digest",
"=",
"'sha512'",
",",
"version",
"=",
"2",
",",
"exts",
"=",
"(",
")",
")",
":",
"crt",
"=",
"crypto",
".",
"X509",
"(",
")",
"crt",
".",
"set_serial_number",
"(",
"serial",
")",
"crt",
".",
"gmtime_adj_notBefore",
"(",
"not_before",
")",
"crt",
".",
"gmtime_adj_notAfter",
"(",
"not_after",
")",
"crt",
".",
"set_issuer",
"(",
"ca_cert",
".",
"get_subject",
"(",
")",
")",
"crt",
".",
"set_subject",
"(",
"csr",
".",
"get_subject",
"(",
")",
")",
"crt",
".",
"set_pubkey",
"(",
"csr",
".",
"get_pubkey",
"(",
")",
")",
"crt",
".",
"set_version",
"(",
"version",
")",
"crt",
".",
"add_extensions",
"(",
"[",
"crypto",
".",
"X509Extension",
"(",
"b'subjectKeyIdentifier'",
",",
"False",
",",
"b'hash'",
",",
"subject",
"=",
"crt",
")",
"]",
")",
"if",
"ca_cert",
".",
"get_subject",
"(",
")",
"==",
"crt",
".",
"get_subject",
"(",
")",
":",
"crt",
".",
"add_extensions",
"(",
"[",
"crypto",
".",
"X509Extension",
"(",
"b'authorityKeyIdentifier'",
",",
"False",
",",
"b'keyid:always'",
",",
"issuer",
"=",
"crt",
")",
"]",
")",
"else",
":",
"crt",
".",
"add_extensions",
"(",
"[",
"crypto",
".",
"X509Extension",
"(",
"b'authorityKeyIdentifier'",
",",
"False",
",",
"b'keyid:always'",
",",
"issuer",
"=",
"ca_cert",
")",
"]",
")",
"crt",
".",
"add_extensions",
"(",
"exts",
")",
"crt",
".",
"sign",
"(",
"ca_key",
",",
"digest",
")",
"return",
"crt"
]
| Make a certificate.
The following extensions are added to all certificates in the following order
*before* additional extensions specified by `exts` kwarg:
- subjectKeyIdentifier
- authorityKeyIdentifier
:param OpenSSL.crypto.X509Request csr: A certificate signing request.
:param OpenSSL.crypto.PKey ca_key: The signing authority's key.
:param OpenSSL.crypto.X509 ca_cert: The signing authority's certificate.
:param int serial: A serial number.
:param int not_before: A number of seconds from now to wait before the certificate is valid.
:param int not_after: A number of seconds from now to wait before expiring the certificate.
:param str digest: A valid digest.
:param int version: The version of SSL to use with this certificate.
:param list[OpenSSL.crypto.X509Extension] exts: A list of extensions to add to this certificate.
:return: A X.509 certificate.
:rtype: :class:`OpenSSL.crypto.X509` | [
"Make",
"a",
"certificate",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L96-L139 | train |
sholsapp/py509 | py509/x509.py | make_certificate_authority | def make_certificate_authority(**name):
"""Make a certificate authority.
A certificate authority can sign certificates. For clients to be able to
validate certificates signed by your certificate authorithy, they must trust
the certificate returned by this function.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A root self-signed certificate to act as an authority.
:rtype: :class:`OpenSSL.crypto.X509`
"""
key = make_pkey()
csr = make_certificate_signing_request(key, **name)
crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')])
return key, crt | python | def make_certificate_authority(**name):
"""Make a certificate authority.
A certificate authority can sign certificates. For clients to be able to
validate certificates signed by your certificate authorithy, they must trust
the certificate returned by this function.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A root self-signed certificate to act as an authority.
:rtype: :class:`OpenSSL.crypto.X509`
"""
key = make_pkey()
csr = make_certificate_signing_request(key, **name)
crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')])
return key, crt | [
"def",
"make_certificate_authority",
"(",
"*",
"*",
"name",
")",
":",
"key",
"=",
"make_pkey",
"(",
")",
"csr",
"=",
"make_certificate_signing_request",
"(",
"key",
",",
"*",
"*",
"name",
")",
"crt",
"=",
"make_certificate",
"(",
"csr",
",",
"key",
",",
"csr",
",",
"make_serial",
"(",
")",
",",
"0",
",",
"10",
"*",
"365",
"*",
"24",
"*",
"60",
"*",
"60",
",",
"exts",
"=",
"[",
"crypto",
".",
"X509Extension",
"(",
"b'basicConstraints'",
",",
"True",
",",
"b'CA:TRUE'",
")",
"]",
")",
"return",
"key",
",",
"crt"
]
| Make a certificate authority.
A certificate authority can sign certificates. For clients to be able to
validate certificates signed by your certificate authorithy, they must trust
the certificate returned by this function.
:param name: Key word arguments containing subject name parts: C, ST, L, O,
OU, CN.
:return: A root self-signed certificate to act as an authority.
:rtype: :class:`OpenSSL.crypto.X509` | [
"Make",
"a",
"certificate",
"authority",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L142-L158 | train |
sholsapp/py509 | py509/x509.py | load_certificate | def load_certificate(filetype, buf):
"""Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate.
"""
x509cert = crypto.load_certificate(filetype, buf)
patch_certificate(x509cert)
return x509cert | python | def load_certificate(filetype, buf):
"""Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate.
"""
x509cert = crypto.load_certificate(filetype, buf)
patch_certificate(x509cert)
return x509cert | [
"def",
"load_certificate",
"(",
"filetype",
",",
"buf",
")",
":",
"x509cert",
"=",
"crypto",
".",
"load_certificate",
"(",
"filetype",
",",
"buf",
")",
"patch_certificate",
"(",
"x509cert",
")",
"return",
"x509cert"
]
| Load a certificate and patch in incubating functionality.
Load a certificate using the same API as
:func:`OpenSSL.crypto.load_certificate` so clients can use this function as a
drop in replacement. Doing so patches in *incubating* functionality:
functionality that is not yet (or possibly will never be) present in
pyOpenSSL.
:param int filetype: The type of data in ``buf`` -- either
:py:data:`OpenSSL.crypto.FILETYPE_PEM` or
:py:data:`OpenSSL.crypto.FILETYPE_ASN1`.
:param str buf: The buffer containing the certificate. | [
"Load",
"a",
"certificate",
"and",
"patch",
"in",
"incubating",
"functionality",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L221-L237 | train |
sholsapp/py509 | py509/x509.py | load_x509_certificates | def load_x509_certificates(buf):
"""Load one or multiple X.509 certificates from a buffer.
:param str buf: A buffer is an instance of `basestring` and can contain multiple
certificates.
:return: An iterator that iterates over certificates in a buffer.
:rtype: list[:class:`OpenSSL.crypto.X509`]
"""
if not isinstance(buf, basestring):
raise ValueError('`buf` should be an instance of `basestring` not `%s`' % type(buf))
for pem in re.findall('(-----BEGIN CERTIFICATE-----\s(\S+\n*)+\s-----END CERTIFICATE-----\s)', buf):
yield load_certificate(crypto.FILETYPE_PEM, pem[0]) | python | def load_x509_certificates(buf):
"""Load one or multiple X.509 certificates from a buffer.
:param str buf: A buffer is an instance of `basestring` and can contain multiple
certificates.
:return: An iterator that iterates over certificates in a buffer.
:rtype: list[:class:`OpenSSL.crypto.X509`]
"""
if not isinstance(buf, basestring):
raise ValueError('`buf` should be an instance of `basestring` not `%s`' % type(buf))
for pem in re.findall('(-----BEGIN CERTIFICATE-----\s(\S+\n*)+\s-----END CERTIFICATE-----\s)', buf):
yield load_certificate(crypto.FILETYPE_PEM, pem[0]) | [
"def",
"load_x509_certificates",
"(",
"buf",
")",
":",
"if",
"not",
"isinstance",
"(",
"buf",
",",
"basestring",
")",
":",
"raise",
"ValueError",
"(",
"'`buf` should be an instance of `basestring` not `%s`'",
"%",
"type",
"(",
"buf",
")",
")",
"for",
"pem",
"in",
"re",
".",
"findall",
"(",
"'(-----BEGIN CERTIFICATE-----\\s(\\S+\\n*)+\\s-----END CERTIFICATE-----\\s)'",
",",
"buf",
")",
":",
"yield",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"pem",
"[",
"0",
"]",
")"
]
| Load one or multiple X.509 certificates from a buffer.
:param str buf: A buffer is an instance of `basestring` and can contain multiple
certificates.
:return: An iterator that iterates over certificates in a buffer.
:rtype: list[:class:`OpenSSL.crypto.X509`] | [
"Load",
"one",
"or",
"multiple",
"X",
".",
"509",
"certificates",
"from",
"a",
"buffer",
"."
]
| 83bd6786a8ec1543b66c42ea5523e611c3e8dc5a | https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/x509.py#L240-L253 | train |
lowandrew/OLCTools | databasesetup/database_setup.py | DatabaseSetup.cowbat | def cowbat(self):
"""
Run all the methods
"""
logging.info('Beginning COWBAT database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'coregenome')):
self.cowbat_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
self.mlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'univec')):
self.univec(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'pointfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='pointfinder',
dbname='pointfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'clark')):
self.clark(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mob_suite')):
self.mob_suite_targets() | python | def cowbat(self):
"""
Run all the methods
"""
logging.info('Beginning COWBAT database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'coregenome')):
self.cowbat_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
self.mlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'univec')):
self.univec(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'pointfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='pointfinder',
dbname='pointfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'clark')):
self.clark(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mob_suite')):
self.mob_suite_targets() | [
"def",
"cowbat",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Beginning COWBAT database downloads'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'genesippr'",
")",
")",
":",
"self",
".",
"sipprverse_targets",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'coregenome'",
")",
")",
":",
"self",
".",
"cowbat_targets",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'ConFindr'",
")",
")",
":",
"self",
".",
"confindr_targets",
"(",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'mash'",
")",
")",
":",
"self",
".",
"mash",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'MLST'",
")",
")",
":",
"self",
".",
"mlst",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'rMLST'",
")",
")",
":",
"self",
".",
"rmlst",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"credentials",
"=",
"self",
".",
"credentials",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'univec'",
")",
")",
":",
"self",
".",
"univec",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'resfinder'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'resfinder'",
",",
"dbname",
"=",
"'resfinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'virulence'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'virulence'",
",",
"dbname",
"=",
"'virulencefinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'serosippr'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'serosippr'",
",",
"dbname",
"=",
"'serotypefinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'pointfinder'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'pointfinder'",
",",
"dbname",
"=",
"'pointfinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'clark'",
")",
")",
":",
"self",
".",
"clark",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'mob_suite'",
")",
")",
":",
"self",
".",
"mob_suite_targets",
"(",
")"
]
| Run all the methods | [
"Run",
"all",
"the",
"methods"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L21-L60 | train |
lowandrew/OLCTools | databasesetup/database_setup.py | DatabaseSetup.sipprverse_full | def sipprverse_full(self):
"""
Run a subset of the methods - only the targets used in the sipprverse are required here
"""
logging.info('Beginning sipprverse full database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
self.mlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db') | python | def sipprverse_full(self):
"""
Run a subset of the methods - only the targets used in the sipprverse are required here
"""
logging.info('Beginning sipprverse full database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'MLST')):
self.mlst(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'rMLST')):
self.rmlst(databasepath=self.databasepath,
credentials=self.credentials)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'resfinder')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='resfinder',
dbname='resfinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'virulence')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='virulence',
dbname='virulencefinder_db')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'serosippr')):
self.cge_db_downloader(databasepath=self.databasepath,
analysistype='serosippr',
dbname='serotypefinder_db') | [
"def",
"sipprverse_full",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Beginning sipprverse full database downloads'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'genesippr'",
")",
")",
":",
"self",
".",
"sipprverse_targets",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'ConFindr'",
")",
")",
":",
"self",
".",
"confindr_targets",
"(",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'mash'",
")",
")",
":",
"self",
".",
"mash",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'MLST'",
")",
")",
":",
"self",
".",
"mlst",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'rMLST'",
")",
")",
":",
"self",
".",
"rmlst",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"credentials",
"=",
"self",
".",
"credentials",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'resfinder'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'resfinder'",
",",
"dbname",
"=",
"'resfinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'virulence'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'virulence'",
",",
"dbname",
"=",
"'virulencefinder_db'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'serosippr'",
")",
")",
":",
"self",
".",
"cge_db_downloader",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
",",
"analysistype",
"=",
"'serosippr'",
",",
"dbname",
"=",
"'serotypefinder_db'",
")"
]
| Run a subset of the methods - only the targets used in the sipprverse are required here | [
"Run",
"a",
"subset",
"of",
"the",
"methods",
"-",
"only",
"the",
"targets",
"used",
"in",
"the",
"sipprverse",
"are",
"required",
"here"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L62-L89 | train |
lowandrew/OLCTools | databasesetup/database_setup.py | DatabaseSetup.sipprverse_method | def sipprverse_method(self):
"""
Reduced subset again. Only sipprverse, MASH, and confindr targets are required
"""
logging.info('Beginning sipprverse method database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath) | python | def sipprverse_method(self):
"""
Reduced subset again. Only sipprverse, MASH, and confindr targets are required
"""
logging.info('Beginning sipprverse method database downloads')
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'genesippr')):
self.sipprverse_targets(databasepath=self.databasepath)
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'ConFindr')):
self.confindr_targets()
if self.overwrite or not os.path.isdir(os.path.join(self.databasepath, 'mash')):
self.mash(databasepath=self.databasepath) | [
"def",
"sipprverse_method",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Beginning sipprverse method database downloads'",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'genesippr'",
")",
")",
":",
"self",
".",
"sipprverse_targets",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'ConFindr'",
")",
")",
":",
"self",
".",
"confindr_targets",
"(",
")",
"if",
"self",
".",
"overwrite",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"databasepath",
",",
"'mash'",
")",
")",
":",
"self",
".",
"mash",
"(",
"databasepath",
"=",
"self",
".",
"databasepath",
")"
]
| Reduced subset again. Only sipprverse, MASH, and confindr targets are required | [
"Reduced",
"subset",
"again",
".",
"Only",
"sipprverse",
"MASH",
"and",
"confindr",
"targets",
"are",
"required"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/databasesetup/database_setup.py#L91-L101 | train |
yamcs/yamcs-python | yamcs-client/yamcs/model.py | Event.severity | def severity(self):
"""
Severity level of the event. One of ``INFO``, ``WATCH``,
``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``.
"""
if self._proto.HasField('severity'):
return yamcs_pb2.Event.EventSeverity.Name(self._proto.severity)
return None | python | def severity(self):
"""
Severity level of the event. One of ``INFO``, ``WATCH``,
``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``.
"""
if self._proto.HasField('severity'):
return yamcs_pb2.Event.EventSeverity.Name(self._proto.severity)
return None | [
"def",
"severity",
"(",
"self",
")",
":",
"if",
"self",
".",
"_proto",
".",
"HasField",
"(",
"'severity'",
")",
":",
"return",
"yamcs_pb2",
".",
"Event",
".",
"EventSeverity",
".",
"Name",
"(",
"self",
".",
"_proto",
".",
"severity",
")",
"return",
"None"
]
| Severity level of the event. One of ``INFO``, ``WATCH``,
``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. | [
"Severity",
"level",
"of",
"the",
"event",
".",
"One",
"of",
"INFO",
"WATCH",
"WARNING",
"DISTRESS",
"CRITICAL",
"or",
"SEVERE",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/model.py#L129-L136 | train |
yamcs/yamcs-python | yamcs-client/yamcs/model.py | Instance.state | def state(self):
"""
State of this instance. One of ``OFFLINE``, ``INITIALIZING``,
``INITIALIZED``, ``STARTING``, ``RUNNING``, ``STOPPING`` or
``FAILED``.
"""
if self._proto.HasField('state'):
return yamcsManagement_pb2.YamcsInstance.InstanceState.Name(self._proto.state)
return None | python | def state(self):
"""
State of this instance. One of ``OFFLINE``, ``INITIALIZING``,
``INITIALIZED``, ``STARTING``, ``RUNNING``, ``STOPPING`` or
``FAILED``.
"""
if self._proto.HasField('state'):
return yamcsManagement_pb2.YamcsInstance.InstanceState.Name(self._proto.state)
return None | [
"def",
"state",
"(",
"self",
")",
":",
"if",
"self",
".",
"_proto",
".",
"HasField",
"(",
"'state'",
")",
":",
"return",
"yamcsManagement_pb2",
".",
"YamcsInstance",
".",
"InstanceState",
".",
"Name",
"(",
"self",
".",
"_proto",
".",
"state",
")",
"return",
"None"
]
| State of this instance. One of ``OFFLINE``, ``INITIALIZING``,
``INITIALIZED``, ``STARTING``, ``RUNNING``, ``STOPPING`` or
``FAILED``. | [
"State",
"of",
"this",
"instance",
".",
"One",
"of",
"OFFLINE",
"INITIALIZING",
"INITIALIZED",
"STARTING",
"RUNNING",
"STOPPING",
"or",
"FAILED",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/model.py#L267-L275 | train |
yamcs/yamcs-python | yamcs-client/yamcs/model.py | Service.state | def state(self):
"""State of this service."""
if self._proto.HasField('state'):
return yamcsManagement_pb2.ServiceState.Name(self._proto.state)
return None | python | def state(self):
"""State of this service."""
if self._proto.HasField('state'):
return yamcsManagement_pb2.ServiceState.Name(self._proto.state)
return None | [
"def",
"state",
"(",
"self",
")",
":",
"if",
"self",
".",
"_proto",
".",
"HasField",
"(",
"'state'",
")",
":",
"return",
"yamcsManagement_pb2",
".",
"ServiceState",
".",
"Name",
"(",
"self",
".",
"_proto",
".",
"state",
")",
"return",
"None"
]
| State of this service. | [
"State",
"of",
"this",
"service",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/model.py#L345-L349 | train |
dalloriam/engel | engel/widgets/base.py | BaseContainer.add_child | def add_child(self, child):
"""
Add a new child element to this widget.
:param child: Object inheriting :class:`BaseElement`.
"""
self.children.append(child)
child.parent = self
if self.view and self.view.is_loaded:
self.view.dispatch({
'name': 'append',
'html': child.compile(),
'selector': '#' + str(self.id)
}) | python | def add_child(self, child):
"""
Add a new child element to this widget.
:param child: Object inheriting :class:`BaseElement`.
"""
self.children.append(child)
child.parent = self
if self.view and self.view.is_loaded:
self.view.dispatch({
'name': 'append',
'html': child.compile(),
'selector': '#' + str(self.id)
}) | [
"def",
"add_child",
"(",
"self",
",",
"child",
")",
":",
"self",
".",
"children",
".",
"append",
"(",
"child",
")",
"child",
".",
"parent",
"=",
"self",
"if",
"self",
".",
"view",
"and",
"self",
".",
"view",
".",
"is_loaded",
":",
"self",
".",
"view",
".",
"dispatch",
"(",
"{",
"'name'",
":",
"'append'",
",",
"'html'",
":",
"child",
".",
"compile",
"(",
")",
",",
"'selector'",
":",
"'#'",
"+",
"str",
"(",
"self",
".",
"id",
")",
"}",
")"
]
| Add a new child element to this widget.
:param child: Object inheriting :class:`BaseElement`. | [
"Add",
"a",
"new",
"child",
"element",
"to",
"this",
"widget",
"."
]
| f3477cd546e885bc53e755b3eb1452ce43ef5697 | https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/base.py#L166-L180 | train |
dalloriam/engel | engel/widgets/base.py | BaseContainer.remove_child | def remove_child(self, child):
"""
Remove a child widget from this widget.
:param child: Object inheriting :class:`BaseElement`
"""
self.children.remove(child)
child.parent = None
if self.view and self.view.is_loaded:
self.view.dispatch({
'name': 'remove',
'selector': '#' + child.id
}) | python | def remove_child(self, child):
"""
Remove a child widget from this widget.
:param child: Object inheriting :class:`BaseElement`
"""
self.children.remove(child)
child.parent = None
if self.view and self.view.is_loaded:
self.view.dispatch({
'name': 'remove',
'selector': '#' + child.id
}) | [
"def",
"remove_child",
"(",
"self",
",",
"child",
")",
":",
"self",
".",
"children",
".",
"remove",
"(",
"child",
")",
"child",
".",
"parent",
"=",
"None",
"if",
"self",
".",
"view",
"and",
"self",
".",
"view",
".",
"is_loaded",
":",
"self",
".",
"view",
".",
"dispatch",
"(",
"{",
"'name'",
":",
"'remove'",
",",
"'selector'",
":",
"'#'",
"+",
"child",
".",
"id",
"}",
")"
]
| Remove a child widget from this widget.
:param child: Object inheriting :class:`BaseElement` | [
"Remove",
"a",
"child",
"widget",
"from",
"this",
"widget",
"."
]
| f3477cd546e885bc53e755b3eb1452ce43ef5697 | https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/base.py#L182-L195 | train |
dalloriam/engel | engel/widgets/base.py | BaseContainer.compile | def compile(self):
"""
Recursively compile this widget as well as all of its children to HTML.
:returns: HTML string representation of this widget.
"""
self.content = "".join(map(lambda x: x.compile(), self.children))
return self._generate_html() | python | def compile(self):
"""
Recursively compile this widget as well as all of its children to HTML.
:returns: HTML string representation of this widget.
"""
self.content = "".join(map(lambda x: x.compile(), self.children))
return self._generate_html() | [
"def",
"compile",
"(",
"self",
")",
":",
"self",
".",
"content",
"=",
"\"\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"compile",
"(",
")",
",",
"self",
".",
"children",
")",
")",
"return",
"self",
".",
"_generate_html",
"(",
")"
]
| Recursively compile this widget as well as all of its children to HTML.
:returns: HTML string representation of this widget. | [
"Recursively",
"compile",
"this",
"widget",
"as",
"well",
"as",
"all",
"of",
"its",
"children",
"to",
"HTML",
"."
]
| f3477cd546e885bc53e755b3eb1452ce43ef5697 | https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/widgets/base.py#L220-L227 | train |
loganasherjones/yapconf | yapconf/handlers.py | ConfigChangeHandler.handle_config_change | def handle_config_change(self, new_config):
"""Handle the new configuration.
Args:
new_config (dict): The new configuration
"""
if self.user_handler:
self.user_handler(self.current_config, new_config)
self._call_spec_handlers(new_config)
self.current_config = copy.deepcopy(new_config) | python | def handle_config_change(self, new_config):
"""Handle the new configuration.
Args:
new_config (dict): The new configuration
"""
if self.user_handler:
self.user_handler(self.current_config, new_config)
self._call_spec_handlers(new_config)
self.current_config = copy.deepcopy(new_config) | [
"def",
"handle_config_change",
"(",
"self",
",",
"new_config",
")",
":",
"if",
"self",
".",
"user_handler",
":",
"self",
".",
"user_handler",
"(",
"self",
".",
"current_config",
",",
"new_config",
")",
"self",
".",
"_call_spec_handlers",
"(",
"new_config",
")",
"self",
".",
"current_config",
"=",
"copy",
".",
"deepcopy",
"(",
"new_config",
")"
]
| Handle the new configuration.
Args:
new_config (dict): The new configuration | [
"Handle",
"the",
"new",
"configuration",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/handlers.py#L23-L33 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.setReferenceVoltage | def setReferenceVoltage(self, caldb, calv):
"""Sets the reference point to determine what outgoing voltage will produce what intensity,
used to calculate the proper output amplitude of components
:param caldb: calibration intensity in dbSPL
:type caldb: float
:param calv: calibration voltage that was used to record the intensity provided
:type calv: float
"""
self.caldb = caldb
self.calv = calv | python | def setReferenceVoltage(self, caldb, calv):
"""Sets the reference point to determine what outgoing voltage will produce what intensity,
used to calculate the proper output amplitude of components
:param caldb: calibration intensity in dbSPL
:type caldb: float
:param calv: calibration voltage that was used to record the intensity provided
:type calv: float
"""
self.caldb = caldb
self.calv = calv | [
"def",
"setReferenceVoltage",
"(",
"self",
",",
"caldb",
",",
"calv",
")",
":",
"self",
".",
"caldb",
"=",
"caldb",
"self",
".",
"calv",
"=",
"calv"
]
| Sets the reference point to determine what outgoing voltage will produce what intensity,
used to calculate the proper output amplitude of components
:param caldb: calibration intensity in dbSPL
:type caldb: float
:param calv: calibration voltage that was used to record the intensity provided
:type calv: float | [
"Sets",
"the",
"reference",
"point",
"to",
"determine",
"what",
"outgoing",
"voltage",
"will",
"produce",
"what",
"intensity",
"used",
"to",
"calculate",
"the",
"proper",
"output",
"amplitude",
"of",
"components"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L73-L83 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.setCalibration | def setCalibration(self, dbBoostArray, frequencies, frange):
"""Sets the calibration to use with this stimulus,
creates a filter that will be applied to output signal generated by this model.
Set arguments to `None` to clear calibration.
:param dbBoostArray: frequency response of the system (in dB)
:type dbBoostArray: numpy.ndarray
:param frequencies: corresponding frequencies for the dbBoostArray
:type frequencies: numpy.ndarray
:param frange: The desired frequency range for which to apply the calibration, in Hz
:type frange: (int, int)
"""
if dbBoostArray is not None and frequencies is not None:
logger = logging.getLogger('main')
if dbBoostArray.shape != frequencies.shape:
logger.error("ERROR: calibration array and frequency array must have same dimensions")
return
if frange is None:
# maximum possible range
frange = (frequencies[0], frequencies[-1])
logger.debug('setting calibration with samplerate {}'.format(self.samplerate()))
fs = self.samplerate()
if fs in StimulusModel.kernelCache:
logger.debug('---->using cached filter')
# makes the assumption that the cache will be cleared if the frequency reponse
# changes
self.impulseResponse = StimulusModel.kernelCache[fs]
else:
logger.debug('---->calculating new filter for fs {}'.format(fs))
self.impulseResponse = impulse_response(fs, dbBoostArray, frequencies, frange)
# mutable type so will affect data structure persistently
StimulusModel.kernelCache[fs] = self.impulseResponse
# store this so we can quickly check if a calibration needs to be re-done
self._calibration_fs = fs
# calculate for the default samplerate, if not already, since
# we are very likely to need it, and it's better to have this done
# up front, than cause lag in the UI later
if DEFAULT_SAMPLERATE not in StimulusModel.kernelCache:
StimulusModel.kernelCache[DEFAULT_SAMPLERATE] = impulse_response(DEFAULT_SAMPLERATE, dbBoostArray, frequencies, frange)
# hang on to these for re-calculating impulse response on samplerate change
self._attenuationVector = dbBoostArray
self._calFrequencies = frequencies
self._calFrange = frange
else:
self.impulseResponse = None | python | def setCalibration(self, dbBoostArray, frequencies, frange):
"""Sets the calibration to use with this stimulus,
creates a filter that will be applied to output signal generated by this model.
Set arguments to `None` to clear calibration.
:param dbBoostArray: frequency response of the system (in dB)
:type dbBoostArray: numpy.ndarray
:param frequencies: corresponding frequencies for the dbBoostArray
:type frequencies: numpy.ndarray
:param frange: The desired frequency range for which to apply the calibration, in Hz
:type frange: (int, int)
"""
if dbBoostArray is not None and frequencies is not None:
logger = logging.getLogger('main')
if dbBoostArray.shape != frequencies.shape:
logger.error("ERROR: calibration array and frequency array must have same dimensions")
return
if frange is None:
# maximum possible range
frange = (frequencies[0], frequencies[-1])
logger.debug('setting calibration with samplerate {}'.format(self.samplerate()))
fs = self.samplerate()
if fs in StimulusModel.kernelCache:
logger.debug('---->using cached filter')
# makes the assumption that the cache will be cleared if the frequency reponse
# changes
self.impulseResponse = StimulusModel.kernelCache[fs]
else:
logger.debug('---->calculating new filter for fs {}'.format(fs))
self.impulseResponse = impulse_response(fs, dbBoostArray, frequencies, frange)
# mutable type so will affect data structure persistently
StimulusModel.kernelCache[fs] = self.impulseResponse
# store this so we can quickly check if a calibration needs to be re-done
self._calibration_fs = fs
# calculate for the default samplerate, if not already, since
# we are very likely to need it, and it's better to have this done
# up front, than cause lag in the UI later
if DEFAULT_SAMPLERATE not in StimulusModel.kernelCache:
StimulusModel.kernelCache[DEFAULT_SAMPLERATE] = impulse_response(DEFAULT_SAMPLERATE, dbBoostArray, frequencies, frange)
# hang on to these for re-calculating impulse response on samplerate change
self._attenuationVector = dbBoostArray
self._calFrequencies = frequencies
self._calFrange = frange
else:
self.impulseResponse = None | [
"def",
"setCalibration",
"(",
"self",
",",
"dbBoostArray",
",",
"frequencies",
",",
"frange",
")",
":",
"if",
"dbBoostArray",
"is",
"not",
"None",
"and",
"frequencies",
"is",
"not",
"None",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"if",
"dbBoostArray",
".",
"shape",
"!=",
"frequencies",
".",
"shape",
":",
"logger",
".",
"error",
"(",
"\"ERROR: calibration array and frequency array must have same dimensions\"",
")",
"return",
"if",
"frange",
"is",
"None",
":",
"# maximum possible range",
"frange",
"=",
"(",
"frequencies",
"[",
"0",
"]",
",",
"frequencies",
"[",
"-",
"1",
"]",
")",
"logger",
".",
"debug",
"(",
"'setting calibration with samplerate {}'",
".",
"format",
"(",
"self",
".",
"samplerate",
"(",
")",
")",
")",
"fs",
"=",
"self",
".",
"samplerate",
"(",
")",
"if",
"fs",
"in",
"StimulusModel",
".",
"kernelCache",
":",
"logger",
".",
"debug",
"(",
"'---->using cached filter'",
")",
"# makes the assumption that the cache will be cleared if the frequency reponse",
"# changes",
"self",
".",
"impulseResponse",
"=",
"StimulusModel",
".",
"kernelCache",
"[",
"fs",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"'---->calculating new filter for fs {}'",
".",
"format",
"(",
"fs",
")",
")",
"self",
".",
"impulseResponse",
"=",
"impulse_response",
"(",
"fs",
",",
"dbBoostArray",
",",
"frequencies",
",",
"frange",
")",
"# mutable type so will affect data structure persistently",
"StimulusModel",
".",
"kernelCache",
"[",
"fs",
"]",
"=",
"self",
".",
"impulseResponse",
"# store this so we can quickly check if a calibration needs to be re-done ",
"self",
".",
"_calibration_fs",
"=",
"fs",
"# calculate for the default samplerate, if not already, since",
"# we are very likely to need it, and it's better to have this done",
"# up front, than cause lag in the UI later",
"if",
"DEFAULT_SAMPLERATE",
"not",
"in",
"StimulusModel",
".",
"kernelCache",
":",
"StimulusModel",
".",
"kernelCache",
"[",
"DEFAULT_SAMPLERATE",
"]",
"=",
"impulse_response",
"(",
"DEFAULT_SAMPLERATE",
",",
"dbBoostArray",
",",
"frequencies",
",",
"frange",
")",
"# hang on to these for re-calculating impulse response on samplerate change",
"self",
".",
"_attenuationVector",
"=",
"dbBoostArray",
"self",
".",
"_calFrequencies",
"=",
"frequencies",
"self",
".",
"_calFrange",
"=",
"frange",
"else",
":",
"self",
".",
"impulseResponse",
"=",
"None"
]
| Sets the calibration to use with this stimulus,
creates a filter that will be applied to output signal generated by this model.
Set arguments to `None` to clear calibration.
:param dbBoostArray: frequency response of the system (in dB)
:type dbBoostArray: numpy.ndarray
:param frequencies: corresponding frequencies for the dbBoostArray
:type frequencies: numpy.ndarray
:param frange: The desired frequency range for which to apply the calibration, in Hz
:type frange: (int, int) | [
"Sets",
"the",
"calibration",
"to",
"use",
"with",
"this",
"stimulus",
"creates",
"a",
"filter",
"that",
"will",
"be",
"applied",
"to",
"output",
"signal",
"generated",
"by",
"this",
"model",
".",
"Set",
"arguments",
"to",
"None",
"to",
"clear",
"calibration",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L85-L134 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.updateCalibration | def updateCalibration(self):
"""Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes
the calibration needs to be recalculated."""
if self.samplerate() != self._calibration_fs:
self.setCalibration(self._attenuationVector, self._calFrequencies, self._calFrange) | python | def updateCalibration(self):
"""Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes
the calibration needs to be recalculated."""
if self.samplerate() != self._calibration_fs:
self.setCalibration(self._attenuationVector, self._calFrequencies, self._calFrange) | [
"def",
"updateCalibration",
"(",
"self",
")",
":",
"if",
"self",
".",
"samplerate",
"(",
")",
"!=",
"self",
".",
"_calibration_fs",
":",
"self",
".",
"setCalibration",
"(",
"self",
".",
"_attenuationVector",
",",
"self",
".",
"_calFrequencies",
",",
"self",
".",
"_calFrange",
")"
]
| Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes
the calibration needs to be recalculated. | [
"Updates",
"the",
"current",
"calibration",
"according",
"to",
"intenal",
"values",
".",
"For",
"example",
"if",
"the",
"stimulus",
"samplerate",
"changes",
"the",
"calibration",
"needs",
"to",
"be",
"recalculated",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L136-L140 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.samplerate | def samplerate(self):
"""Returns the generation rate for this stimulus
:returns: int -- the output samplerate (Hz)
"""
rates = []
for track in self._segments:
for component in track:
# special case, where component is a wav file:
# it will set the master samplerate to match its own
if component.__class__.__name__ == 'Vocalization':
if component.samplerate() is not None:
rates.append(component.samplerate())
if len(set(rates)) > 1:
# error check
# raise Exception("Wav files with different sample rates in same stimulus")
logger = logging.getLogger('main')
logger.error("Wav files with different sample rates in same stimulus")
return None
elif len(set(rates)) == 1:
return rates[0]
else:
return DEFAULT_SAMPLERATE | python | def samplerate(self):
"""Returns the generation rate for this stimulus
:returns: int -- the output samplerate (Hz)
"""
rates = []
for track in self._segments:
for component in track:
# special case, where component is a wav file:
# it will set the master samplerate to match its own
if component.__class__.__name__ == 'Vocalization':
if component.samplerate() is not None:
rates.append(component.samplerate())
if len(set(rates)) > 1:
# error check
# raise Exception("Wav files with different sample rates in same stimulus")
logger = logging.getLogger('main')
logger.error("Wav files with different sample rates in same stimulus")
return None
elif len(set(rates)) == 1:
return rates[0]
else:
return DEFAULT_SAMPLERATE | [
"def",
"samplerate",
"(",
"self",
")",
":",
"rates",
"=",
"[",
"]",
"for",
"track",
"in",
"self",
".",
"_segments",
":",
"for",
"component",
"in",
"track",
":",
"# special case, where component is a wav file:",
"# it will set the master samplerate to match its own",
"if",
"component",
".",
"__class__",
".",
"__name__",
"==",
"'Vocalization'",
":",
"if",
"component",
".",
"samplerate",
"(",
")",
"is",
"not",
"None",
":",
"rates",
".",
"append",
"(",
"component",
".",
"samplerate",
"(",
")",
")",
"if",
"len",
"(",
"set",
"(",
"rates",
")",
")",
">",
"1",
":",
"# error check",
"# raise Exception(\"Wav files with different sample rates in same stimulus\")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"error",
"(",
"\"Wav files with different sample rates in same stimulus\"",
")",
"return",
"None",
"elif",
"len",
"(",
"set",
"(",
"rates",
")",
")",
"==",
"1",
":",
"return",
"rates",
"[",
"0",
"]",
"else",
":",
"return",
"DEFAULT_SAMPLERATE"
]
| Returns the generation rate for this stimulus
:returns: int -- the output samplerate (Hz) | [
"Returns",
"the",
"generation",
"rate",
"for",
"this",
"stimulus"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L147-L170 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.columnCount | def columnCount(self, row=None):
"""Returns the number of components in a track,
or the max number of components in any row, if none given
:param row: track to get count for
:type row: int
:returns: int -- number of components for *row*
"""
if row is not None:
wholerow = self._segments[row]
return len(wholerow)
else:
column_lengths = [len(x) for x in self._segments]
return max(column_lengths) | python | def columnCount(self, row=None):
"""Returns the number of components in a track,
or the max number of components in any row, if none given
:param row: track to get count for
:type row: int
:returns: int -- number of components for *row*
"""
if row is not None:
wholerow = self._segments[row]
return len(wholerow)
else:
column_lengths = [len(x) for x in self._segments]
return max(column_lengths) | [
"def",
"columnCount",
"(",
"self",
",",
"row",
"=",
"None",
")",
":",
"if",
"row",
"is",
"not",
"None",
":",
"wholerow",
"=",
"self",
".",
"_segments",
"[",
"row",
"]",
"return",
"len",
"(",
"wholerow",
")",
"else",
":",
"column_lengths",
"=",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"_segments",
"]",
"return",
"max",
"(",
"column_lengths",
")"
]
| Returns the number of components in a track,
or the max number of components in any row, if none given
:param row: track to get count for
:type row: int
:returns: int -- number of components for *row* | [
"Returns",
"the",
"number",
"of",
"components",
"in",
"a",
"track",
"or",
"the",
"max",
"number",
"of",
"components",
"in",
"any",
"row",
"if",
"none",
"given"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L194-L207 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.componentCount | def componentCount(self):
"""Returns the total number of components in stimulus
:returns: number of components (not including expanded auto-params)
"""
return sum([self.columnCountForRow(x) for x in range(self.rowCount())]) | python | def componentCount(self):
"""Returns the total number of components in stimulus
:returns: number of components (not including expanded auto-params)
"""
return sum([self.columnCountForRow(x) for x in range(self.rowCount())]) | [
"def",
"componentCount",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"self",
".",
"columnCountForRow",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"rowCount",
"(",
")",
")",
"]",
")"
]
| Returns the total number of components in stimulus
:returns: number of components (not including expanded auto-params) | [
"Returns",
"the",
"total",
"number",
"of",
"components",
"in",
"stimulus"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L215-L220 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.component | def component(self, row, col):
"""Gets the components for the location
:param row: track the component is in
:type row: int
:param col: the ith member of the track
:type col: int
:returns: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
"""
try:
comp = self._segments[row][col]
except:
# invalid index
print 'Invalid index'
return None
return comp | python | def component(self, row, col):
"""Gets the components for the location
:param row: track the component is in
:type row: int
:param col: the ith member of the track
:type col: int
:returns: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
"""
try:
comp = self._segments[row][col]
except:
# invalid index
print 'Invalid index'
return None
return comp | [
"def",
"component",
"(",
"self",
",",
"row",
",",
"col",
")",
":",
"try",
":",
"comp",
"=",
"self",
".",
"_segments",
"[",
"row",
"]",
"[",
"col",
"]",
"except",
":",
"# invalid index",
"print",
"'Invalid index'",
"return",
"None",
"return",
"comp"
]
| Gets the components for the location
:param row: track the component is in
:type row: int
:param col: the ith member of the track
:type col: int
:returns: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` | [
"Gets",
"the",
"components",
"for",
"the",
"location"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L222-L237 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.insertComponent | def insertComponent(self, comp, row=0, col=0):
"""Inserts component into model
:param comp: Component to insert into the stimulus
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: Track number to place comp in
:type row: int
:param col: location in track to insert component to
:type col: int
"""
if row > len(self._segments) -1:
self.insertEmptyRow()
self._segments[row].insert(col, comp)
# in case of samplerate change, just always update
self.updateCalibration() | python | def insertComponent(self, comp, row=0, col=0):
"""Inserts component into model
:param comp: Component to insert into the stimulus
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: Track number to place comp in
:type row: int
:param col: location in track to insert component to
:type col: int
"""
if row > len(self._segments) -1:
self.insertEmptyRow()
self._segments[row].insert(col, comp)
# in case of samplerate change, just always update
self.updateCalibration() | [
"def",
"insertComponent",
"(",
"self",
",",
"comp",
",",
"row",
"=",
"0",
",",
"col",
"=",
"0",
")",
":",
"if",
"row",
">",
"len",
"(",
"self",
".",
"_segments",
")",
"-",
"1",
":",
"self",
".",
"insertEmptyRow",
"(",
")",
"self",
".",
"_segments",
"[",
"row",
"]",
".",
"insert",
"(",
"col",
",",
"comp",
")",
"# in case of samplerate change, just always update",
"self",
".",
"updateCalibration",
"(",
")"
]
| Inserts component into model
:param comp: Component to insert into the stimulus
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: Track number to place comp in
:type row: int
:param col: location in track to insert component to
:type col: int | [
"Inserts",
"component",
"into",
"model"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L239-L254 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.overwriteComponent | def overwriteComponent(self, comp, row, col):
"""Overwrites the component at the specficied location with a provided one.
:param comp: New component to insert
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: track location of existing component to overwrite
:type row: int
:param col: location in track of existing component to overwrite
:type col: int
"""
self._segments[row][col] = comp
# in case of samplerate change, just always update
self.updateCalibration() | python | def overwriteComponent(self, comp, row, col):
"""Overwrites the component at the specficied location with a provided one.
:param comp: New component to insert
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: track location of existing component to overwrite
:type row: int
:param col: location in track of existing component to overwrite
:type col: int
"""
self._segments[row][col] = comp
# in case of samplerate change, just always update
self.updateCalibration() | [
"def",
"overwriteComponent",
"(",
"self",
",",
"comp",
",",
"row",
",",
"col",
")",
":",
"self",
".",
"_segments",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"comp",
"# in case of samplerate change, just always update",
"self",
".",
"updateCalibration",
"(",
")"
]
| Overwrites the component at the specficied location with a provided one.
:param comp: New component to insert
:type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:param row: track location of existing component to overwrite
:type row: int
:param col: location in track of existing component to overwrite
:type col: int | [
"Overwrites",
"the",
"component",
"at",
"the",
"specficied",
"location",
"with",
"a",
"provided",
"one",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L256-L269 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.removeLastRow | def removeLastRow(self):
"""Removes the last track"""
lastrow = self._segments.pop(len(self._segments)-1)
if len(lastrow) > 0:
raise Exception("Attempt to remove non-empty stimulus track") | python | def removeLastRow(self):
"""Removes the last track"""
lastrow = self._segments.pop(len(self._segments)-1)
if len(lastrow) > 0:
raise Exception("Attempt to remove non-empty stimulus track") | [
"def",
"removeLastRow",
"(",
"self",
")",
":",
"lastrow",
"=",
"self",
".",
"_segments",
".",
"pop",
"(",
"len",
"(",
"self",
".",
"_segments",
")",
"-",
"1",
")",
"if",
"len",
"(",
"lastrow",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"\"Attempt to remove non-empty stimulus track\"",
")"
]
| Removes the last track | [
"Removes",
"the",
"last",
"track"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L275-L279 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.removeComponent | def removeComponent(self, row,col):
"""Removes the component at the given location
:param row: track location of existing component to remove
:type row: int
:param col: location in track of existing component to remove
:type col: int
"""
self._segments[row].pop(col)
# If this row is now empty we should remove it?
if self.columnCountForRow(-1) == 0:
self.removeRow(len(self._segments)-1)
# in case of samplerate change, just always update
self.updateCalibration() | python | def removeComponent(self, row,col):
"""Removes the component at the given location
:param row: track location of existing component to remove
:type row: int
:param col: location in track of existing component to remove
:type col: int
"""
self._segments[row].pop(col)
# If this row is now empty we should remove it?
if self.columnCountForRow(-1) == 0:
self.removeRow(len(self._segments)-1)
# in case of samplerate change, just always update
self.updateCalibration() | [
"def",
"removeComponent",
"(",
"self",
",",
"row",
",",
"col",
")",
":",
"self",
".",
"_segments",
"[",
"row",
"]",
".",
"pop",
"(",
"col",
")",
"# If this row is now empty we should remove it?",
"if",
"self",
".",
"columnCountForRow",
"(",
"-",
"1",
")",
"==",
"0",
":",
"self",
".",
"removeRow",
"(",
"len",
"(",
"self",
".",
"_segments",
")",
"-",
"1",
")",
"# in case of samplerate change, just always update",
"self",
".",
"updateCalibration",
"(",
")"
]
| Removes the component at the given location
:param row: track location of existing component to remove
:type row: int
:param col: location in track of existing component to remove
:type col: int | [
"Removes",
"the",
"component",
"at",
"the",
"given",
"location"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L285-L300 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.indexByComponent | def indexByComponent(self, component):
"""Returns a location for the given component, or None if
it is not in the model
:param component: Component to get index for
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:returns: (int, int) -- (row, column) of component
"""
for row, rowcontents in enumerate(self._segments):
if component in rowcontents:
column = rowcontents.index(component)
return (row, column) | python | def indexByComponent(self, component):
"""Returns a location for the given component, or None if
it is not in the model
:param component: Component to get index for
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:returns: (int, int) -- (row, column) of component
"""
for row, rowcontents in enumerate(self._segments):
if component in rowcontents:
column = rowcontents.index(component)
return (row, column) | [
"def",
"indexByComponent",
"(",
"self",
",",
"component",
")",
":",
"for",
"row",
",",
"rowcontents",
"in",
"enumerate",
"(",
"self",
".",
"_segments",
")",
":",
"if",
"component",
"in",
"rowcontents",
":",
"column",
"=",
"rowcontents",
".",
"index",
"(",
"component",
")",
"return",
"(",
"row",
",",
"column",
")"
]
| Returns a location for the given component, or None if
it is not in the model
:param component: Component to get index for
:type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
:returns: (int, int) -- (row, column) of component | [
"Returns",
"a",
"location",
"for",
"the",
"given",
"component",
"or",
"None",
"if",
"it",
"is",
"not",
"in",
"the",
"model"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L309-L320 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.traceCount | def traceCount(self):
"""The number of unique stimului for this stimulus object
:returns: int -- The expanded trace count
"""
nsegs = sum([len(track) for track in self._segments])
if nsegs == 0:
return 0
ntraces = 1
for irow in range(self._autoParams.nrows()):
ntraces = ntraces*self._autoParams.numSteps(irow)
return ntraces | python | def traceCount(self):
"""The number of unique stimului for this stimulus object
:returns: int -- The expanded trace count
"""
nsegs = sum([len(track) for track in self._segments])
if nsegs == 0:
return 0
ntraces = 1
for irow in range(self._autoParams.nrows()):
ntraces = ntraces*self._autoParams.numSteps(irow)
return ntraces | [
"def",
"traceCount",
"(",
"self",
")",
":",
"nsegs",
"=",
"sum",
"(",
"[",
"len",
"(",
"track",
")",
"for",
"track",
"in",
"self",
".",
"_segments",
"]",
")",
"if",
"nsegs",
"==",
"0",
":",
"return",
"0",
"ntraces",
"=",
"1",
"for",
"irow",
"in",
"range",
"(",
"self",
".",
"_autoParams",
".",
"nrows",
"(",
")",
")",
":",
"ntraces",
"=",
"ntraces",
"*",
"self",
".",
"_autoParams",
".",
"numSteps",
"(",
"irow",
")",
"return",
"ntraces"
]
| The number of unique stimului for this stimulus object
:returns: int -- The expanded trace count | [
"The",
"number",
"of",
"unique",
"stimului",
"for",
"this",
"stimulus",
"object"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L322-L333 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.contains | def contains(self, stimtype):
"""Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model
"""
for track in self._segments:
for component in track:
if component.__class__.__name__ == stimtype:
return True
return False | python | def contains(self, stimtype):
"""Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model
"""
for track in self._segments:
for component in track:
if component.__class__.__name__ == stimtype:
return True
return False | [
"def",
"contains",
"(",
"self",
",",
"stimtype",
")",
":",
"for",
"track",
"in",
"self",
".",
"_segments",
":",
"for",
"component",
"in",
"track",
":",
"if",
"component",
".",
"__class__",
".",
"__name__",
"==",
"stimtype",
":",
"return",
"True",
"return",
"False"
]
| Returns whether the specified stimlus type is a component in this stimulus
:param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus
:type stimtype: str
:returns: bool -- if the stimtype is in the model | [
"Returns",
"whether",
"the",
"specified",
"stimlus",
"type",
"is",
"a",
"component",
"in",
"this",
"stimulus"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L355-L366 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.purgeAutoSelected | def purgeAutoSelected(self):
"""Clears out orphaned auto parameters"""
params = self._autoParams.allData()
for p in params:
comps_to_remove = []
for comp in p['selection']:
if self.indexByComponent(comp) is None:
comps_to_remove.append(comp)
for orphaned in comps_to_remove:
p['selection'].remove(orphaned) | python | def purgeAutoSelected(self):
"""Clears out orphaned auto parameters"""
params = self._autoParams.allData()
for p in params:
comps_to_remove = []
for comp in p['selection']:
if self.indexByComponent(comp) is None:
comps_to_remove.append(comp)
for orphaned in comps_to_remove:
p['selection'].remove(orphaned) | [
"def",
"purgeAutoSelected",
"(",
"self",
")",
":",
"params",
"=",
"self",
".",
"_autoParams",
".",
"allData",
"(",
")",
"for",
"p",
"in",
"params",
":",
"comps_to_remove",
"=",
"[",
"]",
"for",
"comp",
"in",
"p",
"[",
"'selection'",
"]",
":",
"if",
"self",
".",
"indexByComponent",
"(",
"comp",
")",
"is",
"None",
":",
"comps_to_remove",
".",
"append",
"(",
"comp",
")",
"for",
"orphaned",
"in",
"comps_to_remove",
":",
"p",
"[",
"'selection'",
"]",
".",
"remove",
"(",
"orphaned",
")"
]
| Clears out orphaned auto parameters | [
"Clears",
"out",
"orphaned",
"auto",
"parameters"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L368-L377 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.expandFunction | def expandFunction(self, func, args=[]):
"""applies the given function to each of this stimulus's memerships when autoparamters are applied
:param func: callable to execute for each version of the stimulus
:type instancemethod:
:param args: arguments to feed to func
:type args: list
:returns: list<results of *func*>, one for each trace
"""
# initilize array to hold all varied parameters
params = self._autoParams.allData()
steps = self.autoParamRanges()
ntraces = 1
for p in steps:
ntraces = ntraces*len(p)
varylist = [[None for x in range(len(params))] for y in range(ntraces)]
x = 1
for iset, step_set in enumerate(steps):
for itrace in range(ntraces):
idx = (itrace / x) % len(step_set)
varylist[itrace][iset] = step_set[idx]
x = x*len(step_set)
# now create the stimuli according to steps
# go through list of modifing parameters, update this stimulus,
# and then save current state to list
stim_list = []
for itrace in range(ntraces):
for ip, param in enumerate(params):
for component in param['selection']:
# print 'setting component {} parameter {} to {}'.format(component.name, param['parameter'], varylist[itrace][ip])
# so I encountered a bug when the parameters were dragged the
# pickling/unpickling seems to either make a copy or somehow
# otherwise loose connection to the original components
# make sure to be setting the components that are in this model.
index = self.indexByComponent(component)
component = self.component(*index)
component.set(param['parameter'], varylist[itrace][ip])
# copy of current stim state, or go ahead and turn it into a signal?
# so then would I want to formulate some doc here as well?
stim_list.append(func(*args))
# now reset the components to start value
for ip, param in enumerate(params):
for component in param['selection']:
component.set(param['parameter'], varylist[0][ip])
return stim_list | python | def expandFunction(self, func, args=[]):
"""applies the given function to each of this stimulus's memerships when autoparamters are applied
:param func: callable to execute for each version of the stimulus
:type instancemethod:
:param args: arguments to feed to func
:type args: list
:returns: list<results of *func*>, one for each trace
"""
# initilize array to hold all varied parameters
params = self._autoParams.allData()
steps = self.autoParamRanges()
ntraces = 1
for p in steps:
ntraces = ntraces*len(p)
varylist = [[None for x in range(len(params))] for y in range(ntraces)]
x = 1
for iset, step_set in enumerate(steps):
for itrace in range(ntraces):
idx = (itrace / x) % len(step_set)
varylist[itrace][iset] = step_set[idx]
x = x*len(step_set)
# now create the stimuli according to steps
# go through list of modifing parameters, update this stimulus,
# and then save current state to list
stim_list = []
for itrace in range(ntraces):
for ip, param in enumerate(params):
for component in param['selection']:
# print 'setting component {} parameter {} to {}'.format(component.name, param['parameter'], varylist[itrace][ip])
# so I encountered a bug when the parameters were dragged the
# pickling/unpickling seems to either make a copy or somehow
# otherwise loose connection to the original components
# make sure to be setting the components that are in this model.
index = self.indexByComponent(component)
component = self.component(*index)
component.set(param['parameter'], varylist[itrace][ip])
# copy of current stim state, or go ahead and turn it into a signal?
# so then would I want to formulate some doc here as well?
stim_list.append(func(*args))
# now reset the components to start value
for ip, param in enumerate(params):
for component in param['selection']:
component.set(param['parameter'], varylist[0][ip])
return stim_list | [
"def",
"expandFunction",
"(",
"self",
",",
"func",
",",
"args",
"=",
"[",
"]",
")",
":",
"# initilize array to hold all varied parameters",
"params",
"=",
"self",
".",
"_autoParams",
".",
"allData",
"(",
")",
"steps",
"=",
"self",
".",
"autoParamRanges",
"(",
")",
"ntraces",
"=",
"1",
"for",
"p",
"in",
"steps",
":",
"ntraces",
"=",
"ntraces",
"*",
"len",
"(",
"p",
")",
"varylist",
"=",
"[",
"[",
"None",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"params",
")",
")",
"]",
"for",
"y",
"in",
"range",
"(",
"ntraces",
")",
"]",
"x",
"=",
"1",
"for",
"iset",
",",
"step_set",
"in",
"enumerate",
"(",
"steps",
")",
":",
"for",
"itrace",
"in",
"range",
"(",
"ntraces",
")",
":",
"idx",
"=",
"(",
"itrace",
"/",
"x",
")",
"%",
"len",
"(",
"step_set",
")",
"varylist",
"[",
"itrace",
"]",
"[",
"iset",
"]",
"=",
"step_set",
"[",
"idx",
"]",
"x",
"=",
"x",
"*",
"len",
"(",
"step_set",
")",
"# now create the stimuli according to steps",
"# go through list of modifing parameters, update this stimulus,",
"# and then save current state to list",
"stim_list",
"=",
"[",
"]",
"for",
"itrace",
"in",
"range",
"(",
"ntraces",
")",
":",
"for",
"ip",
",",
"param",
"in",
"enumerate",
"(",
"params",
")",
":",
"for",
"component",
"in",
"param",
"[",
"'selection'",
"]",
":",
"# print 'setting component {} parameter {} to {}'.format(component.name, param['parameter'], varylist[itrace][ip])",
"# so I encountered a bug when the parameters were dragged the",
"# pickling/unpickling seems to either make a copy or somehow",
"# otherwise loose connection to the original components",
"# make sure to be setting the components that are in this model.",
"index",
"=",
"self",
".",
"indexByComponent",
"(",
"component",
")",
"component",
"=",
"self",
".",
"component",
"(",
"*",
"index",
")",
"component",
".",
"set",
"(",
"param",
"[",
"'parameter'",
"]",
",",
"varylist",
"[",
"itrace",
"]",
"[",
"ip",
"]",
")",
"# copy of current stim state, or go ahead and turn it into a signal?",
"# so then would I want to formulate some doc here as well?",
"stim_list",
".",
"append",
"(",
"func",
"(",
"*",
"args",
")",
")",
"# now reset the components to start value",
"for",
"ip",
",",
"param",
"in",
"enumerate",
"(",
"params",
")",
":",
"for",
"component",
"in",
"param",
"[",
"'selection'",
"]",
":",
"component",
".",
"set",
"(",
"param",
"[",
"'parameter'",
"]",
",",
"varylist",
"[",
"0",
"]",
"[",
"ip",
"]",
")",
"return",
"stim_list"
]
| applies the given function to each of this stimulus's memerships when autoparamters are applied
:param func: callable to execute for each version of the stimulus
:type instancemethod:
:param args: arguments to feed to func
:type args: list
:returns: list<results of *func*>, one for each trace | [
"applies",
"the",
"given",
"function",
"to",
"each",
"of",
"this",
"stimulus",
"s",
"memerships",
"when",
"autoparamters",
"are",
"applied"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L392-L441 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.setReorderFunc | def setReorderFunc(self, func, name=None):
"""Sets the function that reorders the expanded signals of this stimulus
:param func: a function which takes the template doc as an argument
:type func: callable
:param name: a name to assign the function (for documentation purposes)
:type name: str
"""
self.reorder = func
self.reorderName = name | python | def setReorderFunc(self, func, name=None):
"""Sets the function that reorders the expanded signals of this stimulus
:param func: a function which takes the template doc as an argument
:type func: callable
:param name: a name to assign the function (for documentation purposes)
:type name: str
"""
self.reorder = func
self.reorderName = name | [
"def",
"setReorderFunc",
"(",
"self",
",",
"func",
",",
"name",
"=",
"None",
")",
":",
"self",
".",
"reorder",
"=",
"func",
"self",
".",
"reorderName",
"=",
"name"
]
| Sets the function that reorders the expanded signals of this stimulus
:param func: a function which takes the template doc as an argument
:type func: callable
:param name: a name to assign the function (for documentation purposes)
:type name: str | [
"Sets",
"the",
"function",
"that",
"reorders",
"the",
"expanded",
"signals",
"of",
"this",
"stimulus"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L443-L452 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.expandedStim | def expandedStim(self):
"""
Apply the autoparameters to this stimulus and return a list of
the resulting stimuli, a complimentary list of doc dictionaries, and
a complimentary list of undesired attenuations.
:returns: list<numpy.ndarray>, list<dict>, list<float> -- the signals, their doc, undesired attenuations (dB)
"""
logger = logging.getLogger('main')
logger.debug("Generating Expanded Stimulus")
# 3 loops now -- could be done in one...
signals = self.expandFunction(self.signal)
docs = self.expandFunction(self.componentDoc)
overloads = []
for s, d in zip(signals, docs):
d['overloaded_attenuation'] = s[2]
overloads.append(s[2])
# remove the undesired attenuation argument
signals = [sig[0:2] for sig in signals]
if self.reorder:
order = self.reorder(docs)
signals = [signals[i] for i in order]
docs = [docs[i] for i in order]
return signals, docs, overloads | python | def expandedStim(self):
"""
Apply the autoparameters to this stimulus and return a list of
the resulting stimuli, a complimentary list of doc dictionaries, and
a complimentary list of undesired attenuations.
:returns: list<numpy.ndarray>, list<dict>, list<float> -- the signals, their doc, undesired attenuations (dB)
"""
logger = logging.getLogger('main')
logger.debug("Generating Expanded Stimulus")
# 3 loops now -- could be done in one...
signals = self.expandFunction(self.signal)
docs = self.expandFunction(self.componentDoc)
overloads = []
for s, d in zip(signals, docs):
d['overloaded_attenuation'] = s[2]
overloads.append(s[2])
# remove the undesired attenuation argument
signals = [sig[0:2] for sig in signals]
if self.reorder:
order = self.reorder(docs)
signals = [signals[i] for i in order]
docs = [docs[i] for i in order]
return signals, docs, overloads | [
"def",
"expandedStim",
"(",
"self",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"debug",
"(",
"\"Generating Expanded Stimulus\"",
")",
"# 3 loops now -- could be done in one...",
"signals",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"signal",
")",
"docs",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"componentDoc",
")",
"overloads",
"=",
"[",
"]",
"for",
"s",
",",
"d",
"in",
"zip",
"(",
"signals",
",",
"docs",
")",
":",
"d",
"[",
"'overloaded_attenuation'",
"]",
"=",
"s",
"[",
"2",
"]",
"overloads",
".",
"append",
"(",
"s",
"[",
"2",
"]",
")",
"# remove the undesired attenuation argument",
"signals",
"=",
"[",
"sig",
"[",
"0",
":",
"2",
"]",
"for",
"sig",
"in",
"signals",
"]",
"if",
"self",
".",
"reorder",
":",
"order",
"=",
"self",
".",
"reorder",
"(",
"docs",
")",
"signals",
"=",
"[",
"signals",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"docs",
"=",
"[",
"docs",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"return",
"signals",
",",
"docs",
",",
"overloads"
]
| Apply the autoparameters to this stimulus and return a list of
the resulting stimuli, a complimentary list of doc dictionaries, and
a complimentary list of undesired attenuations.
:returns: list<numpy.ndarray>, list<dict>, list<float> -- the signals, their doc, undesired attenuations (dB) | [
"Apply",
"the",
"autoparameters",
"to",
"this",
"stimulus",
"and",
"return",
"a",
"list",
"of",
"the",
"resulting",
"stimuli",
"a",
"complimentary",
"list",
"of",
"doc",
"dictionaries",
"and",
"a",
"complimentary",
"list",
"of",
"undesired",
"attenuations",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L454-L481 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.loadFromTemplate | def loadFromTemplate(template, stim=None):
"""Loads the stimlus to the state provided by a template
:param template: dict that includes all info nesessary to recreate stim
:type template: dict
:param stim: Stimulus to apply to, creates a new model if None
:type stim: StimulusModel
"""
if stim is None:
stim = StimulusModel()
stim.setRepCount(template['reps'])
stim.setUserTag(template.get('user_tag', ''))
# don't set calibration details - this should be the same application wide
component_classes = get_stimuli_models()
for comp_doc in template['components']:
comp = get_component(comp_doc['stim_type'], component_classes)
comp.loadState(comp_doc) # ignore extra dict entries
stim.insertComponent(comp, *comp_doc['index'])
# revert from location based selection to component list
autoparams = template['autoparameters']
for p in autoparams:
selection = p['selection']
component_selection = []
for index in selection:
component = stim.component(*index)
component_selection.append(component)
p['selection'] = component_selection
stim.autoParams().setParameterList(autoparams)
stim.setReorderFunc(order_function(template['reorder']), template['reorder'])
stim.setStimType(template['testtype'])
return stim | python | def loadFromTemplate(template, stim=None):
"""Loads the stimlus to the state provided by a template
:param template: dict that includes all info nesessary to recreate stim
:type template: dict
:param stim: Stimulus to apply to, creates a new model if None
:type stim: StimulusModel
"""
if stim is None:
stim = StimulusModel()
stim.setRepCount(template['reps'])
stim.setUserTag(template.get('user_tag', ''))
# don't set calibration details - this should be the same application wide
component_classes = get_stimuli_models()
for comp_doc in template['components']:
comp = get_component(comp_doc['stim_type'], component_classes)
comp.loadState(comp_doc) # ignore extra dict entries
stim.insertComponent(comp, *comp_doc['index'])
# revert from location based selection to component list
autoparams = template['autoparameters']
for p in autoparams:
selection = p['selection']
component_selection = []
for index in selection:
component = stim.component(*index)
component_selection.append(component)
p['selection'] = component_selection
stim.autoParams().setParameterList(autoparams)
stim.setReorderFunc(order_function(template['reorder']), template['reorder'])
stim.setStimType(template['testtype'])
return stim | [
"def",
"loadFromTemplate",
"(",
"template",
",",
"stim",
"=",
"None",
")",
":",
"if",
"stim",
"is",
"None",
":",
"stim",
"=",
"StimulusModel",
"(",
")",
"stim",
".",
"setRepCount",
"(",
"template",
"[",
"'reps'",
"]",
")",
"stim",
".",
"setUserTag",
"(",
"template",
".",
"get",
"(",
"'user_tag'",
",",
"''",
")",
")",
"# don't set calibration details - this should be the same application wide",
"component_classes",
"=",
"get_stimuli_models",
"(",
")",
"for",
"comp_doc",
"in",
"template",
"[",
"'components'",
"]",
":",
"comp",
"=",
"get_component",
"(",
"comp_doc",
"[",
"'stim_type'",
"]",
",",
"component_classes",
")",
"comp",
".",
"loadState",
"(",
"comp_doc",
")",
"# ignore extra dict entries",
"stim",
".",
"insertComponent",
"(",
"comp",
",",
"*",
"comp_doc",
"[",
"'index'",
"]",
")",
"# revert from location based selection to component list",
"autoparams",
"=",
"template",
"[",
"'autoparameters'",
"]",
"for",
"p",
"in",
"autoparams",
":",
"selection",
"=",
"p",
"[",
"'selection'",
"]",
"component_selection",
"=",
"[",
"]",
"for",
"index",
"in",
"selection",
":",
"component",
"=",
"stim",
".",
"component",
"(",
"*",
"index",
")",
"component_selection",
".",
"append",
"(",
"component",
")",
"p",
"[",
"'selection'",
"]",
"=",
"component_selection",
"stim",
".",
"autoParams",
"(",
")",
".",
"setParameterList",
"(",
"autoparams",
")",
"stim",
".",
"setReorderFunc",
"(",
"order_function",
"(",
"template",
"[",
"'reorder'",
"]",
")",
",",
"template",
"[",
"'reorder'",
"]",
")",
"stim",
".",
"setStimType",
"(",
"template",
"[",
"'testtype'",
"]",
")",
"return",
"stim"
]
| Loads the stimlus to the state provided by a template
:param template: dict that includes all info nesessary to recreate stim
:type template: dict
:param stim: Stimulus to apply to, creates a new model if None
:type stim: StimulusModel | [
"Loads",
"the",
"stimlus",
"to",
"the",
"state",
"provided",
"by",
"a",
"template"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L506-L538 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.duration | def duration(self):
"""The duration of this stimulus
:returns: float -- duration in seconds
"""
durs = []
for track in self._segments:
durs.append(sum([comp.duration() for comp in track]))
return max(durs) | python | def duration(self):
"""The duration of this stimulus
:returns: float -- duration in seconds
"""
durs = []
for track in self._segments:
durs.append(sum([comp.duration() for comp in track]))
return max(durs) | [
"def",
"duration",
"(",
"self",
")",
":",
"durs",
"=",
"[",
"]",
"for",
"track",
"in",
"self",
".",
"_segments",
":",
"durs",
".",
"append",
"(",
"sum",
"(",
"[",
"comp",
".",
"duration",
"(",
")",
"for",
"comp",
"in",
"track",
"]",
")",
")",
"return",
"max",
"(",
"durs",
")"
]
| The duration of this stimulus
:returns: float -- duration in seconds | [
"The",
"duration",
"of",
"this",
"stimulus"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L557-L566 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.signal | def signal(self, force_fs=False):
"""The current stimulus in signal representation, this is the sum
of its components
:param force_fs: Allow to use a different samplerate than the default, should be used to recreate historical signals only
:type force_fs: int
:returns: numpy.ndarray -- voltage signal for this stimulus
"""
assert None not in self.voltage_limits, 'Max voltage level not set'
if force_fs:
samplerate = force_fs
else:
samplerate = self.samplerate()
track_signals = []
max_db = max([comp.intensity() for t in self._segments for comp in t])
# atten = self.caldb - max_db
atten = 0
# if max_db > self.caldb:
# raise Exception("Stimulus intensity over maxium")
# print 'caldb:', self.caldb, 'max db:', max_db, 'atten:', atten, 'calv', self.calv
for track in self._segments:
track_list = []
for component in track:
track_list.append(component.signal(fs=samplerate,
atten=0,
caldb=self.caldb,
calv=self.calv))
if len(track_list) > 0:
track_signals.append(np.hstack(track_list))
# track_signals = sorted(track_signals, key=len, reverse=True)
full_len = len(max(track_signals, key=len))
total_signal = np.zeros((full_len,))
for track in track_signals:
total_signal[0:len(track)] += track
# if there is only square waves in stimulus, do not apply calibration --
# it is assumed to not be a signal for the speaker
component_names = list(set([comp.name for track in self._segments for comp in track]))
if 'silence' in component_names:
component_names.remove('silence')
if len(component_names) > 1 or (len(component_names) == 1 and component_names[0] != "Square Wave"):
total_signal = convolve_filter(total_signal, self.impulseResponse)
maxv = self.voltage_limits[0]
to_speaker = True
else:
maxv = self.voltage_limits[1]
to_speaker = False
# last sample should always go to 0, so output isn't stuck on some
# other value when stim ends
total_signal[-1] = 0
undesired_attenuation = 0
# sig_max = max(abs(total_signal))
# if sig_max > self.calv:
# over_db = 20 * np.log10(sig_max/self.calv)
# allowance = float(min(over_db, atten))
# scalev = (10 ** (allowance/20)*self.calv)
# total_signal = total_signal/scalev
# print 'sigmax {}, over_db {}, allowance {}, scalev {}'.format(sig_max, over_db, allowance, scalev)
# atten -= allowance
minv = self.voltage_limits[2]
sig_max = np.max(abs(total_signal))
if sig_max > maxv:
# scale stim down to outputable max
total_signal = (total_signal/sig_max)*maxv
attenuated = 20 * np.log10(sig_max/maxv)
if attenuated <= atten:
atten = atten - attenuated
else:
undesired_attenuation = attenuated - atten
atten = 0
logger = logging.getLogger('main')
logger.warning("STIMULUS AMPLTIUDE {:.2f}V EXCEEDS MAXIMUM({}V), RESCALING. \
UNDESIRED ATTENUATION {:.2f}dB".format(sig_max, maxv, undesired_attenuation))
elif sig_max < minv and sig_max !=0 and to_speaker:
before_rms = np.sqrt(np.mean(pow(total_signal,2)))
total_signal = (total_signal/sig_max)*minv
after_rms = np.sqrt(np.mean(pow(total_signal,2)))
attenuated = -20 * np.log10(before_rms/after_rms)
# print 'signal below min, adding {} attenuation'.format(attenuated)
atten += attenuated
return total_signal, atten, undesired_attenuation | python | def signal(self, force_fs=False):
"""The current stimulus in signal representation, this is the sum
of its components
:param force_fs: Allow to use a different samplerate than the default, should be used to recreate historical signals only
:type force_fs: int
:returns: numpy.ndarray -- voltage signal for this stimulus
"""
assert None not in self.voltage_limits, 'Max voltage level not set'
if force_fs:
samplerate = force_fs
else:
samplerate = self.samplerate()
track_signals = []
max_db = max([comp.intensity() for t in self._segments for comp in t])
# atten = self.caldb - max_db
atten = 0
# if max_db > self.caldb:
# raise Exception("Stimulus intensity over maxium")
# print 'caldb:', self.caldb, 'max db:', max_db, 'atten:', atten, 'calv', self.calv
for track in self._segments:
track_list = []
for component in track:
track_list.append(component.signal(fs=samplerate,
atten=0,
caldb=self.caldb,
calv=self.calv))
if len(track_list) > 0:
track_signals.append(np.hstack(track_list))
# track_signals = sorted(track_signals, key=len, reverse=True)
full_len = len(max(track_signals, key=len))
total_signal = np.zeros((full_len,))
for track in track_signals:
total_signal[0:len(track)] += track
# if there is only square waves in stimulus, do not apply calibration --
# it is assumed to not be a signal for the speaker
component_names = list(set([comp.name for track in self._segments for comp in track]))
if 'silence' in component_names:
component_names.remove('silence')
if len(component_names) > 1 or (len(component_names) == 1 and component_names[0] != "Square Wave"):
total_signal = convolve_filter(total_signal, self.impulseResponse)
maxv = self.voltage_limits[0]
to_speaker = True
else:
maxv = self.voltage_limits[1]
to_speaker = False
# last sample should always go to 0, so output isn't stuck on some
# other value when stim ends
total_signal[-1] = 0
undesired_attenuation = 0
# sig_max = max(abs(total_signal))
# if sig_max > self.calv:
# over_db = 20 * np.log10(sig_max/self.calv)
# allowance = float(min(over_db, atten))
# scalev = (10 ** (allowance/20)*self.calv)
# total_signal = total_signal/scalev
# print 'sigmax {}, over_db {}, allowance {}, scalev {}'.format(sig_max, over_db, allowance, scalev)
# atten -= allowance
minv = self.voltage_limits[2]
sig_max = np.max(abs(total_signal))
if sig_max > maxv:
# scale stim down to outputable max
total_signal = (total_signal/sig_max)*maxv
attenuated = 20 * np.log10(sig_max/maxv)
if attenuated <= atten:
atten = atten - attenuated
else:
undesired_attenuation = attenuated - atten
atten = 0
logger = logging.getLogger('main')
logger.warning("STIMULUS AMPLTIUDE {:.2f}V EXCEEDS MAXIMUM({}V), RESCALING. \
UNDESIRED ATTENUATION {:.2f}dB".format(sig_max, maxv, undesired_attenuation))
elif sig_max < minv and sig_max !=0 and to_speaker:
before_rms = np.sqrt(np.mean(pow(total_signal,2)))
total_signal = (total_signal/sig_max)*minv
after_rms = np.sqrt(np.mean(pow(total_signal,2)))
attenuated = -20 * np.log10(before_rms/after_rms)
# print 'signal below min, adding {} attenuation'.format(attenuated)
atten += attenuated
return total_signal, atten, undesired_attenuation | [
"def",
"signal",
"(",
"self",
",",
"force_fs",
"=",
"False",
")",
":",
"assert",
"None",
"not",
"in",
"self",
".",
"voltage_limits",
",",
"'Max voltage level not set'",
"if",
"force_fs",
":",
"samplerate",
"=",
"force_fs",
"else",
":",
"samplerate",
"=",
"self",
".",
"samplerate",
"(",
")",
"track_signals",
"=",
"[",
"]",
"max_db",
"=",
"max",
"(",
"[",
"comp",
".",
"intensity",
"(",
")",
"for",
"t",
"in",
"self",
".",
"_segments",
"for",
"comp",
"in",
"t",
"]",
")",
"# atten = self.caldb - max_db",
"atten",
"=",
"0",
"# if max_db > self.caldb:",
"# raise Exception(\"Stimulus intensity over maxium\")",
"# print 'caldb:', self.caldb, 'max db:', max_db, 'atten:', atten, 'calv', self.calv",
"for",
"track",
"in",
"self",
".",
"_segments",
":",
"track_list",
"=",
"[",
"]",
"for",
"component",
"in",
"track",
":",
"track_list",
".",
"append",
"(",
"component",
".",
"signal",
"(",
"fs",
"=",
"samplerate",
",",
"atten",
"=",
"0",
",",
"caldb",
"=",
"self",
".",
"caldb",
",",
"calv",
"=",
"self",
".",
"calv",
")",
")",
"if",
"len",
"(",
"track_list",
")",
">",
"0",
":",
"track_signals",
".",
"append",
"(",
"np",
".",
"hstack",
"(",
"track_list",
")",
")",
"# track_signals = sorted(track_signals, key=len, reverse=True)",
"full_len",
"=",
"len",
"(",
"max",
"(",
"track_signals",
",",
"key",
"=",
"len",
")",
")",
"total_signal",
"=",
"np",
".",
"zeros",
"(",
"(",
"full_len",
",",
")",
")",
"for",
"track",
"in",
"track_signals",
":",
"total_signal",
"[",
"0",
":",
"len",
"(",
"track",
")",
"]",
"+=",
"track",
"# if there is only square waves in stimulus, do not apply calibration --",
"# it is assumed to not be a signal for the speaker",
"component_names",
"=",
"list",
"(",
"set",
"(",
"[",
"comp",
".",
"name",
"for",
"track",
"in",
"self",
".",
"_segments",
"for",
"comp",
"in",
"track",
"]",
")",
")",
"if",
"'silence'",
"in",
"component_names",
":",
"component_names",
".",
"remove",
"(",
"'silence'",
")",
"if",
"len",
"(",
"component_names",
")",
">",
"1",
"or",
"(",
"len",
"(",
"component_names",
")",
"==",
"1",
"and",
"component_names",
"[",
"0",
"]",
"!=",
"\"Square Wave\"",
")",
":",
"total_signal",
"=",
"convolve_filter",
"(",
"total_signal",
",",
"self",
".",
"impulseResponse",
")",
"maxv",
"=",
"self",
".",
"voltage_limits",
"[",
"0",
"]",
"to_speaker",
"=",
"True",
"else",
":",
"maxv",
"=",
"self",
".",
"voltage_limits",
"[",
"1",
"]",
"to_speaker",
"=",
"False",
"# last sample should always go to 0, so output isn't stuck on some",
"# other value when stim ends",
"total_signal",
"[",
"-",
"1",
"]",
"=",
"0",
"undesired_attenuation",
"=",
"0",
"# sig_max = max(abs(total_signal))",
"# if sig_max > self.calv:",
"# over_db = 20 * np.log10(sig_max/self.calv)",
"# allowance = float(min(over_db, atten))",
"# scalev = (10 ** (allowance/20)*self.calv)",
"# total_signal = total_signal/scalev",
"# print 'sigmax {}, over_db {}, allowance {}, scalev {}'.format(sig_max, over_db, allowance, scalev)",
"# atten -= allowance",
"minv",
"=",
"self",
".",
"voltage_limits",
"[",
"2",
"]",
"sig_max",
"=",
"np",
".",
"max",
"(",
"abs",
"(",
"total_signal",
")",
")",
"if",
"sig_max",
">",
"maxv",
":",
"# scale stim down to outputable max",
"total_signal",
"=",
"(",
"total_signal",
"/",
"sig_max",
")",
"*",
"maxv",
"attenuated",
"=",
"20",
"*",
"np",
".",
"log10",
"(",
"sig_max",
"/",
"maxv",
")",
"if",
"attenuated",
"<=",
"atten",
":",
"atten",
"=",
"atten",
"-",
"attenuated",
"else",
":",
"undesired_attenuation",
"=",
"attenuated",
"-",
"atten",
"atten",
"=",
"0",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"warning",
"(",
"\"STIMULUS AMPLTIUDE {:.2f}V EXCEEDS MAXIMUM({}V), RESCALING. \\\n UNDESIRED ATTENUATION {:.2f}dB\"",
".",
"format",
"(",
"sig_max",
",",
"maxv",
",",
"undesired_attenuation",
")",
")",
"elif",
"sig_max",
"<",
"minv",
"and",
"sig_max",
"!=",
"0",
"and",
"to_speaker",
":",
"before_rms",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"pow",
"(",
"total_signal",
",",
"2",
")",
")",
")",
"total_signal",
"=",
"(",
"total_signal",
"/",
"sig_max",
")",
"*",
"minv",
"after_rms",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"pow",
"(",
"total_signal",
",",
"2",
")",
")",
")",
"attenuated",
"=",
"-",
"20",
"*",
"np",
".",
"log10",
"(",
"before_rms",
"/",
"after_rms",
")",
"# print 'signal below min, adding {} attenuation'.format(attenuated)",
"atten",
"+=",
"attenuated",
"return",
"total_signal",
",",
"atten",
",",
"undesired_attenuation"
]
| The current stimulus in signal representation, this is the sum
of its components
:param force_fs: Allow to use a different samplerate than the default, should be used to recreate historical signals only
:type force_fs: int
:returns: numpy.ndarray -- voltage signal for this stimulus | [
"The",
"current",
"stimulus",
"in",
"signal",
"representation",
"this",
"is",
"the",
"sum",
"of",
"its",
"components"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L568-L654 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.componentDoc | def componentDoc(self, starttime=True):
"""The documentation for the components, as a dict
:returns dict -- values are the generation samplerate, and a list of
the individual component docs
"""
samplerate = self.samplerate()
doc_list = []
for row, track in enumerate(self._segments):
start_time = 0
for col, component in enumerate(track):
info = component.stateDict()
info['stim_type'] = component.name
if starttime:
info['start_s'] = start_time
info['index'] = (row, col)
start_time += info['duration']
doc_list.append(info)
return {'samplerate_da':samplerate, 'components' : doc_list} | python | def componentDoc(self, starttime=True):
"""The documentation for the components, as a dict
:returns dict -- values are the generation samplerate, and a list of
the individual component docs
"""
samplerate = self.samplerate()
doc_list = []
for row, track in enumerate(self._segments):
start_time = 0
for col, component in enumerate(track):
info = component.stateDict()
info['stim_type'] = component.name
if starttime:
info['start_s'] = start_time
info['index'] = (row, col)
start_time += info['duration']
doc_list.append(info)
return {'samplerate_da':samplerate, 'components' : doc_list} | [
"def",
"componentDoc",
"(",
"self",
",",
"starttime",
"=",
"True",
")",
":",
"samplerate",
"=",
"self",
".",
"samplerate",
"(",
")",
"doc_list",
"=",
"[",
"]",
"for",
"row",
",",
"track",
"in",
"enumerate",
"(",
"self",
".",
"_segments",
")",
":",
"start_time",
"=",
"0",
"for",
"col",
",",
"component",
"in",
"enumerate",
"(",
"track",
")",
":",
"info",
"=",
"component",
".",
"stateDict",
"(",
")",
"info",
"[",
"'stim_type'",
"]",
"=",
"component",
".",
"name",
"if",
"starttime",
":",
"info",
"[",
"'start_s'",
"]",
"=",
"start_time",
"info",
"[",
"'index'",
"]",
"=",
"(",
"row",
",",
"col",
")",
"start_time",
"+=",
"info",
"[",
"'duration'",
"]",
"doc_list",
".",
"append",
"(",
"info",
")",
"return",
"{",
"'samplerate_da'",
":",
"samplerate",
",",
"'components'",
":",
"doc_list",
"}"
]
| The documentation for the components, as a dict
:returns dict -- values are the generation samplerate, and a list of
the individual component docs | [
"The",
"documentation",
"for",
"the",
"components",
"as",
"a",
"dict"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L656-L675 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.warning | def warning(self):
"""Checks Stimulus for any warning conditions
:returns: str -- warning message, if any, 0 otherwise
"""
signals, docs, overs = self.expandedStim()
if np.any(np.array(overs) > 0):
msg = 'Stimuli in this test are over the maximum allowable \
voltage output. They will be rescaled with a maximum \
undesired attenuation of {:.2f}dB.'.format(np.amax(overs))
return msg
return 0 | python | def warning(self):
"""Checks Stimulus for any warning conditions
:returns: str -- warning message, if any, 0 otherwise
"""
signals, docs, overs = self.expandedStim()
if np.any(np.array(overs) > 0):
msg = 'Stimuli in this test are over the maximum allowable \
voltage output. They will be rescaled with a maximum \
undesired attenuation of {:.2f}dB.'.format(np.amax(overs))
return msg
return 0 | [
"def",
"warning",
"(",
"self",
")",
":",
"signals",
",",
"docs",
",",
"overs",
"=",
"self",
".",
"expandedStim",
"(",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"array",
"(",
"overs",
")",
">",
"0",
")",
":",
"msg",
"=",
"'Stimuli in this test are over the maximum allowable \\\n voltage output. They will be rescaled with a maximum \\\n undesired attenuation of {:.2f}dB.'",
".",
"format",
"(",
"np",
".",
"amax",
"(",
"overs",
")",
")",
"return",
"msg",
"return",
"0"
]
| Checks Stimulus for any warning conditions
:returns: str -- warning message, if any, 0 otherwise | [
"Checks",
"Stimulus",
"for",
"any",
"warning",
"conditions"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L715-L726 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.verifyExpanded | def verifyExpanded(self, samplerate):
"""Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise"""
results = self.expandFunction(self.verifyComponents, args=(samplerate,))
msg = [x for x in results if x]
if len(msg) > 0:
return msg[0]
else:
return 0 | python | def verifyExpanded(self, samplerate):
"""Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise"""
results = self.expandFunction(self.verifyComponents, args=(samplerate,))
msg = [x for x in results if x]
if len(msg) > 0:
return msg[0]
else:
return 0 | [
"def",
"verifyExpanded",
"(",
"self",
",",
"samplerate",
")",
":",
"results",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"verifyComponents",
",",
"args",
"=",
"(",
"samplerate",
",",
")",
")",
"msg",
"=",
"[",
"x",
"for",
"x",
"in",
"results",
"if",
"x",
"]",
"if",
"len",
"(",
"msg",
")",
">",
"0",
":",
"return",
"msg",
"[",
"0",
"]",
"else",
":",
"return",
"0"
]
| Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise | [
"Checks",
"the",
"expanded",
"parameters",
"for",
"invalidating",
"conditions"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L728-L739 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.verifyComponents | def verifyComponents(self, samplerate):
"""Checks the current components for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise
"""
# flatten list of components
components = [comp for track in self._segments for comp in track]
for comp in components:
msg = comp.verify(samplerate=samplerate)
if msg:
return msg
return 0 | python | def verifyComponents(self, samplerate):
"""Checks the current components for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise
"""
# flatten list of components
components = [comp for track in self._segments for comp in track]
for comp in components:
msg = comp.verify(samplerate=samplerate)
if msg:
return msg
return 0 | [
"def",
"verifyComponents",
"(",
"self",
",",
"samplerate",
")",
":",
"# flatten list of components",
"components",
"=",
"[",
"comp",
"for",
"track",
"in",
"self",
".",
"_segments",
"for",
"comp",
"in",
"track",
"]",
"for",
"comp",
"in",
"components",
":",
"msg",
"=",
"comp",
".",
"verify",
"(",
"samplerate",
"=",
"samplerate",
")",
"if",
"msg",
":",
"return",
"msg",
"return",
"0"
]
| Checks the current components for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise | [
"Checks",
"the",
"current",
"components",
"for",
"invalidating",
"conditions"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L741-L754 | train |
portfors-lab/sparkle | sparkle/stim/stimulus_model.py | StimulusModel.verify | def verify(self, windowSize=None):
"""Checks the stimulus, including expanded parameters for invalidating conditions
:param windowSize: acquistion (recording) window size (seconds)
:type windowSize: float
:returns: str -- error message, if any, 0 otherwise"""
if self.samplerate() is None:
return "Multiple recording files with conflicting samplerates"
msg = self._autoParams.verify()
if msg:
return msg
if self.traceCount() == 0:
return "Test is empty"
if windowSize is not None:
durations = self.expandFunction(self.duration)
# print 'windowSize', windowSize, 'self', durations[0], durations[-1]
# ranges are linear, so we only need to test first and last
if durations[0] > windowSize or durations[-1] > windowSize:
return "Stimulus duration exceeds window duration"
msg = self.verifyExpanded(self.samplerate())
if msg:
return msg
if self.caldb is None or self.calv is None:
return "Test reference voltage not set"
if None in self.voltage_limits:
return "Device voltage limits not set"
return 0 | python | def verify(self, windowSize=None):
"""Checks the stimulus, including expanded parameters for invalidating conditions
:param windowSize: acquistion (recording) window size (seconds)
:type windowSize: float
:returns: str -- error message, if any, 0 otherwise"""
if self.samplerate() is None:
return "Multiple recording files with conflicting samplerates"
msg = self._autoParams.verify()
if msg:
return msg
if self.traceCount() == 0:
return "Test is empty"
if windowSize is not None:
durations = self.expandFunction(self.duration)
# print 'windowSize', windowSize, 'self', durations[0], durations[-1]
# ranges are linear, so we only need to test first and last
if durations[0] > windowSize or durations[-1] > windowSize:
return "Stimulus duration exceeds window duration"
msg = self.verifyExpanded(self.samplerate())
if msg:
return msg
if self.caldb is None or self.calv is None:
return "Test reference voltage not set"
if None in self.voltage_limits:
return "Device voltage limits not set"
return 0 | [
"def",
"verify",
"(",
"self",
",",
"windowSize",
"=",
"None",
")",
":",
"if",
"self",
".",
"samplerate",
"(",
")",
"is",
"None",
":",
"return",
"\"Multiple recording files with conflicting samplerates\"",
"msg",
"=",
"self",
".",
"_autoParams",
".",
"verify",
"(",
")",
"if",
"msg",
":",
"return",
"msg",
"if",
"self",
".",
"traceCount",
"(",
")",
"==",
"0",
":",
"return",
"\"Test is empty\"",
"if",
"windowSize",
"is",
"not",
"None",
":",
"durations",
"=",
"self",
".",
"expandFunction",
"(",
"self",
".",
"duration",
")",
"# print 'windowSize', windowSize, 'self', durations[0], durations[-1]",
"# ranges are linear, so we only need to test first and last",
"if",
"durations",
"[",
"0",
"]",
">",
"windowSize",
"or",
"durations",
"[",
"-",
"1",
"]",
">",
"windowSize",
":",
"return",
"\"Stimulus duration exceeds window duration\"",
"msg",
"=",
"self",
".",
"verifyExpanded",
"(",
"self",
".",
"samplerate",
"(",
")",
")",
"if",
"msg",
":",
"return",
"msg",
"if",
"self",
".",
"caldb",
"is",
"None",
"or",
"self",
".",
"calv",
"is",
"None",
":",
"return",
"\"Test reference voltage not set\"",
"if",
"None",
"in",
"self",
".",
"voltage_limits",
":",
"return",
"\"Device voltage limits not set\"",
"return",
"0"
]
| Checks the stimulus, including expanded parameters for invalidating conditions
:param windowSize: acquistion (recording) window size (seconds)
:type windowSize: float
:returns: str -- error message, if any, 0 otherwise | [
"Checks",
"the",
"stimulus",
"including",
"expanded",
"parameters",
"for",
"invalidating",
"conditions"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L756-L782 | train |
portfors-lab/sparkle | sparkle/acq/daq_tasks.py | get_ao_chans | def get_ao_chans(dev):
"""Discover and return a list of the names of all analog output channels for the given device
:param dev: the device name
:type dev: str
"""
buf = create_string_buffer(256)
buflen = c_uint32(sizeof(buf))
DAQmxGetDevAOPhysicalChans(dev.encode(), buf, buflen)
pybuf = buf.value
chans = pybuf.decode(u'utf-8').split(u",")
return chans | python | def get_ao_chans(dev):
"""Discover and return a list of the names of all analog output channels for the given device
:param dev: the device name
:type dev: str
"""
buf = create_string_buffer(256)
buflen = c_uint32(sizeof(buf))
DAQmxGetDevAOPhysicalChans(dev.encode(), buf, buflen)
pybuf = buf.value
chans = pybuf.decode(u'utf-8').split(u",")
return chans | [
"def",
"get_ao_chans",
"(",
"dev",
")",
":",
"buf",
"=",
"create_string_buffer",
"(",
"256",
")",
"buflen",
"=",
"c_uint32",
"(",
"sizeof",
"(",
"buf",
")",
")",
"DAQmxGetDevAOPhysicalChans",
"(",
"dev",
".",
"encode",
"(",
")",
",",
"buf",
",",
"buflen",
")",
"pybuf",
"=",
"buf",
".",
"value",
"chans",
"=",
"pybuf",
".",
"decode",
"(",
"u'utf-8'",
")",
".",
"split",
"(",
"u\",\"",
")",
"return",
"chans"
]
| Discover and return a list of the names of all analog output channels for the given device
:param dev: the device name
:type dev: str | [
"Discover",
"and",
"return",
"a",
"list",
"of",
"the",
"names",
"of",
"all",
"analog",
"output",
"channels",
"for",
"the",
"given",
"device"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/daq_tasks.py#L313-L324 | train |
portfors-lab/sparkle | sparkle/acq/daq_tasks.py | get_devices | def get_devices():
"""Discover and return a list of the names of all NI devices on this system"""
buf = create_string_buffer(512)
buflen = c_uint32(sizeof(buf))
DAQmxGetSysDevNames(buf, buflen)
pybuf = buf.value
devices = pybuf.decode(u'utf-8').split(u",")
return devices | python | def get_devices():
"""Discover and return a list of the names of all NI devices on this system"""
buf = create_string_buffer(512)
buflen = c_uint32(sizeof(buf))
DAQmxGetSysDevNames(buf, buflen)
pybuf = buf.value
devices = pybuf.decode(u'utf-8').split(u",")
return devices | [
"def",
"get_devices",
"(",
")",
":",
"buf",
"=",
"create_string_buffer",
"(",
"512",
")",
"buflen",
"=",
"c_uint32",
"(",
"sizeof",
"(",
"buf",
")",
")",
"DAQmxGetSysDevNames",
"(",
"buf",
",",
"buflen",
")",
"pybuf",
"=",
"buf",
".",
"value",
"devices",
"=",
"pybuf",
".",
"decode",
"(",
"u'utf-8'",
")",
".",
"split",
"(",
"u\",\"",
")",
"return",
"devices"
]
| Discover and return a list of the names of all NI devices on this system | [
"Discover",
"and",
"return",
"a",
"list",
"of",
"the",
"names",
"of",
"all",
"NI",
"devices",
"on",
"this",
"system"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/daq_tasks.py#L339-L346 | train |
portfors-lab/sparkle | sparkle/acq/daq_tasks.py | AOTask.write | def write(self, output):
"""Writes the data to be output to the device buffer, output will be looped when the data runs out
:param output: data to output
:type output: numpy.ndarray
"""
w = c_int32()
# print "output max", max(abs(output))
self.WriteAnalogF64(self.bufsize, 0, 10.0, DAQmx_Val_GroupByChannel,
output, w, None); | python | def write(self, output):
"""Writes the data to be output to the device buffer, output will be looped when the data runs out
:param output: data to output
:type output: numpy.ndarray
"""
w = c_int32()
# print "output max", max(abs(output))
self.WriteAnalogF64(self.bufsize, 0, 10.0, DAQmx_Val_GroupByChannel,
output, w, None); | [
"def",
"write",
"(",
"self",
",",
"output",
")",
":",
"w",
"=",
"c_int32",
"(",
")",
"# print \"output max\", max(abs(output))",
"self",
".",
"WriteAnalogF64",
"(",
"self",
".",
"bufsize",
",",
"0",
",",
"10.0",
",",
"DAQmx_Val_GroupByChannel",
",",
"output",
",",
"w",
",",
"None",
")"
]
| Writes the data to be output to the device buffer, output will be looped when the data runs out
:param output: data to output
:type output: numpy.ndarray | [
"Writes",
"the",
"data",
"to",
"be",
"output",
"to",
"the",
"device",
"buffer",
"output",
"will",
"be",
"looped",
"when",
"the",
"data",
"runs",
"out"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/daq_tasks.py#L109-L118 | train |
portfors-lab/sparkle | sparkle/gui/plotting/calibration_explore_display.py | ExtendedCalibrationDisplay.setXlimits | def setXlimits(self, lims):
"""Sets the X axis limits of the signal plots
:param lims: (min, max) of x axis, in same units as data
:type lims: (float, float)
"""
self.responseSignalPlot.setXlim(lims)
self.stimSignalPlot.setXlim(lims) | python | def setXlimits(self, lims):
"""Sets the X axis limits of the signal plots
:param lims: (min, max) of x axis, in same units as data
:type lims: (float, float)
"""
self.responseSignalPlot.setXlim(lims)
self.stimSignalPlot.setXlim(lims) | [
"def",
"setXlimits",
"(",
"self",
",",
"lims",
")",
":",
"self",
".",
"responseSignalPlot",
".",
"setXlim",
"(",
"lims",
")",
"self",
".",
"stimSignalPlot",
".",
"setXlim",
"(",
"lims",
")"
]
| Sets the X axis limits of the signal plots
:param lims: (min, max) of x axis, in same units as data
:type lims: (float, float) | [
"Sets",
"the",
"X",
"axis",
"limits",
"of",
"the",
"signal",
"plots"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/calibration_explore_display.py#L136-L143 | train |
NoviceLive/pat | pat/pat.py | Pat.from_chars | def from_chars(cls, chars='', optimal=3):
"""Construct a Pat object from the specified string
and optimal position count."""
if not chars:
chars = ''.join(ALNUM)
sets = most_even_chunk(chars, optimal)
return cls(sets) | python | def from_chars(cls, chars='', optimal=3):
"""Construct a Pat object from the specified string
and optimal position count."""
if not chars:
chars = ''.join(ALNUM)
sets = most_even_chunk(chars, optimal)
return cls(sets) | [
"def",
"from_chars",
"(",
"cls",
",",
"chars",
"=",
"''",
",",
"optimal",
"=",
"3",
")",
":",
"if",
"not",
"chars",
":",
"chars",
"=",
"''",
".",
"join",
"(",
"ALNUM",
")",
"sets",
"=",
"most_even_chunk",
"(",
"chars",
",",
"optimal",
")",
"return",
"cls",
"(",
"sets",
")"
]
| Construct a Pat object from the specified string
and optimal position count. | [
"Construct",
"a",
"Pat",
"object",
"from",
"the",
"specified",
"string",
"and",
"optimal",
"position",
"count",
"."
]
| bd223fc5e758213662befbebdf9538f3fbf58ad6 | https://github.com/NoviceLive/pat/blob/bd223fc5e758213662befbebdf9538f3fbf58ad6/pat/pat.py#L47-L53 | train |
NoviceLive/pat | pat/pat.py | Pat.create | def create(self, count):
"""Create a pattern of the specified length."""
space, self.space = tee(self.space)
limit = reduce(mul, map(len, self.sets)) * self.position
logging.debug('limit: %s', limit)
if limit >= count:
return ''.join(islice(space, count))
else:
raise IndexError('{count} Overflows {sets}!'.format(
count=count, sets=self.sets)) | python | def create(self, count):
"""Create a pattern of the specified length."""
space, self.space = tee(self.space)
limit = reduce(mul, map(len, self.sets)) * self.position
logging.debug('limit: %s', limit)
if limit >= count:
return ''.join(islice(space, count))
else:
raise IndexError('{count} Overflows {sets}!'.format(
count=count, sets=self.sets)) | [
"def",
"create",
"(",
"self",
",",
"count",
")",
":",
"space",
",",
"self",
".",
"space",
"=",
"tee",
"(",
"self",
".",
"space",
")",
"limit",
"=",
"reduce",
"(",
"mul",
",",
"map",
"(",
"len",
",",
"self",
".",
"sets",
")",
")",
"*",
"self",
".",
"position",
"logging",
".",
"debug",
"(",
"'limit: %s'",
",",
"limit",
")",
"if",
"limit",
">=",
"count",
":",
"return",
"''",
".",
"join",
"(",
"islice",
"(",
"space",
",",
"count",
")",
")",
"else",
":",
"raise",
"IndexError",
"(",
"'{count} Overflows {sets}!'",
".",
"format",
"(",
"count",
"=",
"count",
",",
"sets",
"=",
"self",
".",
"sets",
")",
")"
]
| Create a pattern of the specified length. | [
"Create",
"a",
"pattern",
"of",
"the",
"specified",
"length",
"."
]
| bd223fc5e758213662befbebdf9538f3fbf58ad6 | https://github.com/NoviceLive/pat/blob/bd223fc5e758213662befbebdf9538f3fbf58ad6/pat/pat.py#L63-L72 | train |
NoviceLive/pat | pat/pat.py | Pat.locate | def locate(self, pattern, big_endian=False):
"""Locate the pattern."""
space, self.space = tee(self.space)
if pattern.startswith('0x'):
target = unhexlify(
pattern[2:].encode('utf-8')).decode('utf-8')
if not big_endian:
target = target[::-1]
else:
target = pattern
for index, one in enumerate(window(space, self.position)):
if ''.join(one) == target[:self.position]:
return index
raise KeyError('{target} Not Found In {sets}!'.format(
target=pattern, sets=self.sets)) | python | def locate(self, pattern, big_endian=False):
"""Locate the pattern."""
space, self.space = tee(self.space)
if pattern.startswith('0x'):
target = unhexlify(
pattern[2:].encode('utf-8')).decode('utf-8')
if not big_endian:
target = target[::-1]
else:
target = pattern
for index, one in enumerate(window(space, self.position)):
if ''.join(one) == target[:self.position]:
return index
raise KeyError('{target} Not Found In {sets}!'.format(
target=pattern, sets=self.sets)) | [
"def",
"locate",
"(",
"self",
",",
"pattern",
",",
"big_endian",
"=",
"False",
")",
":",
"space",
",",
"self",
".",
"space",
"=",
"tee",
"(",
"self",
".",
"space",
")",
"if",
"pattern",
".",
"startswith",
"(",
"'0x'",
")",
":",
"target",
"=",
"unhexlify",
"(",
"pattern",
"[",
"2",
":",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"not",
"big_endian",
":",
"target",
"=",
"target",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"target",
"=",
"pattern",
"for",
"index",
",",
"one",
"in",
"enumerate",
"(",
"window",
"(",
"space",
",",
"self",
".",
"position",
")",
")",
":",
"if",
"''",
".",
"join",
"(",
"one",
")",
"==",
"target",
"[",
":",
"self",
".",
"position",
"]",
":",
"return",
"index",
"raise",
"KeyError",
"(",
"'{target} Not Found In {sets}!'",
".",
"format",
"(",
"target",
"=",
"pattern",
",",
"sets",
"=",
"self",
".",
"sets",
")",
")"
]
| Locate the pattern. | [
"Locate",
"the",
"pattern",
"."
]
| bd223fc5e758213662befbebdf9538f3fbf58ad6 | https://github.com/NoviceLive/pat/blob/bd223fc5e758213662befbebdf9538f3fbf58ad6/pat/pat.py#L74-L88 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/common.py | preserve_namespace | def preserve_namespace(newns=None):
"""Contextmanager that will restore the current namespace
:param newns: a name of namespace that should be set in the beginning. the original namespace will be restored afterwards.
If None, does not set a namespace.
:type newns: str | None
:returns: None
:rtype: None
:raises: None
"""
ns = cmds.namespaceInfo(an=True)
try:
cmds.namespace(set=newns)
yield
finally:
cmds.namespace(set=ns) | python | def preserve_namespace(newns=None):
"""Contextmanager that will restore the current namespace
:param newns: a name of namespace that should be set in the beginning. the original namespace will be restored afterwards.
If None, does not set a namespace.
:type newns: str | None
:returns: None
:rtype: None
:raises: None
"""
ns = cmds.namespaceInfo(an=True)
try:
cmds.namespace(set=newns)
yield
finally:
cmds.namespace(set=ns) | [
"def",
"preserve_namespace",
"(",
"newns",
"=",
"None",
")",
":",
"ns",
"=",
"cmds",
".",
"namespaceInfo",
"(",
"an",
"=",
"True",
")",
"try",
":",
"cmds",
".",
"namespace",
"(",
"set",
"=",
"newns",
")",
"yield",
"finally",
":",
"cmds",
".",
"namespace",
"(",
"set",
"=",
"ns",
")"
]
| Contextmanager that will restore the current namespace
:param newns: a name of namespace that should be set in the beginning. the original namespace will be restored afterwards.
If None, does not set a namespace.
:type newns: str | None
:returns: None
:rtype: None
:raises: None | [
"Contextmanager",
"that",
"will",
"restore",
"the",
"current",
"namespace"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/common.py#L8-L23 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/common.py | preserve_selection | def preserve_selection():
"""Contextmanager that will restore the current selection
:returns: None
:rtype: None
:raises: None
"""
sl = cmds.ls(sl=True)
try:
yield
finally:
cmds.select(sl, replace=True) | python | def preserve_selection():
"""Contextmanager that will restore the current selection
:returns: None
:rtype: None
:raises: None
"""
sl = cmds.ls(sl=True)
try:
yield
finally:
cmds.select(sl, replace=True) | [
"def",
"preserve_selection",
"(",
")",
":",
"sl",
"=",
"cmds",
".",
"ls",
"(",
"sl",
"=",
"True",
")",
"try",
":",
"yield",
"finally",
":",
"cmds",
".",
"select",
"(",
"sl",
",",
"replace",
"=",
"True",
")"
]
| Contextmanager that will restore the current selection
:returns: None
:rtype: None
:raises: None | [
"Contextmanager",
"that",
"will",
"restore",
"the",
"current",
"selection"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/common.py#L27-L38 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.