body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def test_remove(cache):
'Remove item from cache.'
cache.set('key', 'value')
cache.remove('key')
with pytest.raises(KeyError):
cache.get('key') | -3,648,456,079,809,969,700 | Remove item from cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_remove | Mikfr83/OpenPype | python | def test_remove(cache):
cache.set('key', 'value')
cache.remove('key')
with pytest.raises(KeyError):
cache.get('key') |
def test_remove_missing_key(cache):
'Fail to remove missing key.'
with pytest.raises(KeyError):
cache.remove('key') | -1,180,368,428,535,808,000 | Fail to remove missing key. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_remove_missing_key | Mikfr83/OpenPype | python | def test_remove_missing_key(cache):
with pytest.raises(KeyError):
cache.remove('key') |
def test_keys(cache):
'Retrieve keys of items in cache.'
assert (cache.keys() == [])
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert (sorted(cache.keys()) == sorted(['a', 'b', 'c'])) | 2,380,956,238,466,858,000 | Retrieve keys of items in cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_keys | Mikfr83/OpenPype | python | def test_keys(cache):
assert (cache.keys() == [])
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert (sorted(cache.keys()) == sorted(['a', 'b', 'c'])) |
def test_clear(cache):
'Remove items from cache.'
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert cache.keys()
cache.clear()
assert (not cache.keys()) | 8,938,928,090,908,785,000 | Remove items from cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_clear | Mikfr83/OpenPype | python | def test_clear(cache):
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert cache.keys()
cache.clear()
assert (not cache.keys()) |
def test_clear_using_pattern(cache):
'Remove items that match pattern from cache.'
cache.set('matching_key', 'value')
cache.set('another_matching_key', 'value')
cache.set('key_not_matching', 'value')
assert cache.keys()
cache.clear(pattern='.*matching_key$')
assert (cache.keys() == ['key_not_matching']) | -3,497,932,755,989,748,000 | Remove items that match pattern from cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_clear_using_pattern | Mikfr83/OpenPype | python | def test_clear_using_pattern(cache):
cache.set('matching_key', 'value')
cache.set('another_matching_key', 'value')
cache.set('key_not_matching', 'value')
assert cache.keys()
cache.clear(pattern='.*matching_key$')
assert (cache.keys() == ['key_not_matching']) |
def test_clear_encountering_missing_key(cache, mocker):
'Clear missing key.'
mocker.patch.object(cache, 'keys', (lambda : ['missing']))
assert (cache.keys() == ['missing'])
cache.clear()
assert (cache.keys() == ['missing']) | 706,498,093,905,534,000 | Clear missing key. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_clear_encountering_missing_key | Mikfr83/OpenPype | python | def test_clear_encountering_missing_key(cache, mocker):
mocker.patch.object(cache, 'keys', (lambda : ['missing']))
assert (cache.keys() == ['missing'])
cache.clear()
assert (cache.keys() == ['missing']) |
def test_layered_cache_propagates_value_on_get():
'Layered cache propagates value on get.'
caches = [ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache()]
cache = ftrack_api.cache.LayeredCache(caches)
caches[1].set('key', 'value')
assert (cache.get('key') == 'value')
assert (caches[0].get('key') == 'value')
with pytest.raises(KeyError):
caches[2].get('key') | 6,359,358,823,997,194,000 | Layered cache propagates value on get. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_layered_cache_propagates_value_on_get | Mikfr83/OpenPype | python | def test_layered_cache_propagates_value_on_get():
caches = [ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache()]
cache = ftrack_api.cache.LayeredCache(caches)
caches[1].set('key', 'value')
assert (cache.get('key') == 'value')
assert (caches[0].get('key') == 'value')
with pytest.raises(KeyError):
caches[2].get('key') |
def test_layered_cache_remove_at_depth():
'Remove key that only exists at depth in LayeredCache.'
caches = [ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache()]
cache = ftrack_api.cache.LayeredCache(caches)
caches[1].set('key', 'value')
cache.remove('key')
assert (not cache.keys()) | -5,683,228,833,643,329,000 | Remove key that only exists at depth in LayeredCache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_layered_cache_remove_at_depth | Mikfr83/OpenPype | python | def test_layered_cache_remove_at_depth():
caches = [ftrack_api.cache.MemoryCache(), ftrack_api.cache.MemoryCache()]
cache = ftrack_api.cache.LayeredCache(caches)
caches[1].set('key', 'value')
cache.remove('key')
assert (not cache.keys()) |
def test_expand_references():
'Test that references are expanded from serialized cache.'
cache_path = os.path.join(tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex))
def make_cache(session, cache_path):
'Create a serialised file cache.'
serialized_file_cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.FileCache(cache_path), encode=session.encode, decode=session.decode)
return serialized_file_cache
session = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
expanded_results = dict()
query_string = 'select asset.parent from AssetVersion where asset is_not None limit 10'
for sequence in session.query(query_string):
asset = sequence.get('asset')
expanded_results.setdefault(asset.get('id'), asset.get('parent'))
new_session = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
new_session_two = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
for sequence in new_session.query(query_string):
asset = sequence.get('asset')
assert (asset.get('parent') == expanded_results[asset.get('id')])
assert (new_session_two.get(asset.entity_type, asset.get('id')).get('parent') == expanded_results[asset.get('id')]) | -3,992,479,736,581,640,700 | Test that references are expanded from serialized cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_expand_references | Mikfr83/OpenPype | python | def test_expand_references():
cache_path = os.path.join(tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex))
def make_cache(session, cache_path):
'Create a serialised file cache.'
serialized_file_cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.FileCache(cache_path), encode=session.encode, decode=session.decode)
return serialized_file_cache
session = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
expanded_results = dict()
query_string = 'select asset.parent from AssetVersion where asset is_not None limit 10'
for sequence in session.query(query_string):
asset = sequence.get('asset')
expanded_results.setdefault(asset.get('id'), asset.get('parent'))
new_session = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
new_session_two = ftrack_api.Session(cache=(lambda session, cache_path=cache_path: make_cache(session, cache_path)))
for sequence in new_session.query(query_string):
asset = sequence.get('asset')
assert (asset.get('parent') == expanded_results[asset.get('id')])
assert (new_session_two.get(asset.entity_type, asset.get('id')).get('parent') == expanded_results[asset.get('id')]) |
@pytest.mark.parametrize('items, key', [(({},), '{}'), (({}, {}), '{}{}')], ids=['single object', 'multiple objects'])
def test_string_key_maker_key(items, key):
'Generate key using string key maker.'
key_maker = ftrack_api.cache.StringKeyMaker()
assert (key_maker.key(*items) == key) | 7,473,182,888,146,984,000 | Generate key using string key maker. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_string_key_maker_key | Mikfr83/OpenPype | python | @pytest.mark.parametrize('items, key', [(({},), '{}'), (({}, {}), '{}{}')], ids=['single object', 'multiple objects'])
def test_string_key_maker_key(items, key):
key_maker = ftrack_api.cache.StringKeyMaker()
assert (key_maker.key(*items) == key) |
@pytest.mark.parametrize('items, key', [(({},), '\x01\x01'), (({'a': 'b'}, [1, 2]), '\x01\x80\x02U\x01a.\x02\x80\x02U\x01b.\x01\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03'), ((function,), '\x04function\x00unit.test_cache'), ((Class,), '\x04Class\x00unit.test_cache'), ((Class.method,), '\x04method\x00Class\x00unit.test_cache'), ((callable,), '\x04callable')], ids=['single mapping', 'multiple objects', 'function', 'class', 'method', 'builtin'])
def test_object_key_maker_key(items, key):
'Generate key using string key maker.'
key_maker = ftrack_api.cache.ObjectKeyMaker()
assert (key_maker.key(*items) == key) | 5,483,889,124,009,491,000 | Generate key using string key maker. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_object_key_maker_key | Mikfr83/OpenPype | python | @pytest.mark.parametrize('items, key', [(({},), '\x01\x01'), (({'a': 'b'}, [1, 2]), '\x01\x80\x02U\x01a.\x02\x80\x02U\x01b.\x01\x00\x03\x80\x02K\x01.\x00\x80\x02K\x02.\x03'), ((function,), '\x04function\x00unit.test_cache'), ((Class,), '\x04Class\x00unit.test_cache'), ((Class.method,), '\x04method\x00Class\x00unit.test_cache'), ((callable,), '\x04callable')], ids=['single mapping', 'multiple objects', 'function', 'class', 'method', 'builtin'])
def test_object_key_maker_key(items, key):
key_maker = ftrack_api.cache.ObjectKeyMaker()
assert (key_maker.key(*items) == key) |
def test_memoised_call():
'Call memoised function.'
memoiser = ftrack_api.cache.Memoiser()
assert_memoised_call(memoiser, function, args=(1,), expected={'result': 3}, memoised=False)
assert_memoised_call(memoiser, function, args=(1,), expected={'result': 3}, memoised=True)
assert_memoised_call(memoiser, function, args=(3,), expected={'result': 5}, memoised=False) | 8,366,717,468,665,115,000 | Call memoised function. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_memoised_call | Mikfr83/OpenPype | python | def test_memoised_call():
memoiser = ftrack_api.cache.Memoiser()
assert_memoised_call(memoiser, function, args=(1,), expected={'result': 3}, memoised=False)
assert_memoised_call(memoiser, function, args=(1,), expected={'result': 3}, memoised=True)
assert_memoised_call(memoiser, function, args=(3,), expected={'result': 5}, memoised=False) |
def test_memoised_call_variations():
'Call memoised function with identical arguments using variable format.'
memoiser = ftrack_api.cache.Memoiser()
expected = {'result': 3}
assert_memoised_call(memoiser, function, args=(1,), expected=expected, memoised=False)
for (args, kw) in [((), {'x': 1}), ((), {'x': 1, 'y': 2}), ((1,), {'y': 2}), ((1,), {})]:
assert_memoised_call(memoiser, function, args=args, kw=kw, expected=expected)
assert_memoised_call(memoiser, function, kw={'x': 2}, expected={'result': 4}, memoised=False)
assert_memoised_call(memoiser, function, kw={'x': 3, 'y': 2}, expected={'result': 5}, memoised=False)
assert_memoised_call(memoiser, function, args=(4,), kw={'y': 2}, expected={'result': 6}, memoised=False)
assert_memoised_call(memoiser, function, args=(5,), expected={'result': 7}, memoised=False) | -2,489,926,817,176,224,300 | Call memoised function with identical arguments using variable format. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_memoised_call_variations | Mikfr83/OpenPype | python | def test_memoised_call_variations():
memoiser = ftrack_api.cache.Memoiser()
expected = {'result': 3}
assert_memoised_call(memoiser, function, args=(1,), expected=expected, memoised=False)
for (args, kw) in [((), {'x': 1}), ((), {'x': 1, 'y': 2}), ((1,), {'y': 2}), ((1,), {})]:
assert_memoised_call(memoiser, function, args=args, kw=kw, expected=expected)
assert_memoised_call(memoiser, function, kw={'x': 2}, expected={'result': 4}, memoised=False)
assert_memoised_call(memoiser, function, kw={'x': 3, 'y': 2}, expected={'result': 5}, memoised=False)
assert_memoised_call(memoiser, function, args=(4,), kw={'y': 2}, expected={'result': 6}, memoised=False)
assert_memoised_call(memoiser, function, args=(5,), expected={'result': 7}, memoised=False) |
def test_memoised_mutable_return_value():
'Avoid side effects for returned mutable arguments when memoising.'
memoiser = ftrack_api.cache.Memoiser()
arguments = ({'called': False}, 1)
result_a = memoiser.call(function, arguments)
assert (result_a == {'result': 3})
assert arguments[0]['called']
del result_a['result']
arguments[0]['called'] = False
result_b = memoiser.call(function, arguments)
assert (result_b == {'result': 3})
assert (not arguments[0]['called']) | 1,472,722,377,510,152,000 | Avoid side effects for returned mutable arguments when memoising. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | test_memoised_mutable_return_value | Mikfr83/OpenPype | python | def test_memoised_mutable_return_value():
memoiser = ftrack_api.cache.Memoiser()
arguments = ({'called': False}, 1)
result_a = memoiser.call(function, arguments)
assert (result_a == {'result': 3})
assert arguments[0]['called']
del result_a['result']
arguments[0]['called'] = False
result_b = memoiser.call(function, arguments)
assert (result_b == {'result': 3})
assert (not arguments[0]['called']) |
def method(self, key):
'Method for testing.' | -4,187,073,399,752,700,000 | Method for testing. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | method | Mikfr83/OpenPype | python | def method(self, key):
|
def make_cache(session, cache_path):
'Create a serialised file cache.'
serialized_file_cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.FileCache(cache_path), encode=session.encode, decode=session.decode)
return serialized_file_cache | 4,168,924,182,699,053,000 | Create a serialised file cache. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | make_cache | Mikfr83/OpenPype | python | def make_cache(session, cache_path):
serialized_file_cache = ftrack_api.cache.SerialisedCache(ftrack_api.cache.FileCache(cache_path), encode=session.encode, decode=session.decode)
return serialized_file_cache |
def cleanup():
'Cleanup.'
try:
os.remove(cache_path)
except OSError:
os.remove((cache_path + '.db')) | 5,222,959,378,955,900,000 | Cleanup. | openpype/modules/ftrack/python2_vendor/ftrack-python-api/test/unit/test_cache.py | cleanup | Mikfr83/OpenPype | python | def cleanup():
try:
os.remove(cache_path)
except OSError:
os.remove((cache_path + '.db')) |
def initializer_event(event):
'Initializer that set a global test event for test synchronization'
global _test_event
_test_event = event | 677,735,602,909,768,400 | Initializer that set a global test event for test synchronization | tests/_executor_mixin.py | initializer_event | pombredanne/loky | python | def initializer_event(event):
global _test_event
_test_event = event |
def _direct_children_with_cmdline(p):
'Helper to fetch cmdline from children process list'
children_with_cmdline = []
for c in p.children():
try:
cmdline = ' '.join(c.cmdline())
if ((not c.is_running()) or (not cmdline)):
continue
children_with_cmdline.append((c, cmdline))
except (OSError, psutil.NoSuchProcess, psutil.AccessDenied):
pass
return children_with_cmdline | 7,898,568,119,053,496,000 | Helper to fetch cmdline from children process list | tests/_executor_mixin.py | _direct_children_with_cmdline | pombredanne/loky | python | def _direct_children_with_cmdline(p):
children_with_cmdline = []
for c in p.children():
try:
cmdline = ' '.join(c.cmdline())
if ((not c.is_running()) or (not cmdline)):
continue
children_with_cmdline.append((c, cmdline))
except (OSError, psutil.NoSuchProcess, psutil.AccessDenied):
pass
return children_with_cmdline |
def teardown_method(self, method):
'Make sure the executor can be recovered after the tests'
executor = get_reusable_executor(max_workers=2)
assert (executor.submit(math.sqrt, 1).result() == 1)
_check_subprocesses_number(executor, expected_max_process_number=2) | 2,701,411,222,473,137,000 | Make sure the executor can be recovered after the tests | tests/_executor_mixin.py | teardown_method | pombredanne/loky | python | def teardown_method(self, method):
executor = get_reusable_executor(max_workers=2)
assert (executor.submit(math.sqrt, 1).result() == 1)
_check_subprocesses_number(executor, expected_max_process_number=2) |
def __init__(self, program, parent):
'\n Args:\n parent is responsible for the order in which this window is updated,\n relative to its siblings.\n '
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
if (parent is not None):
assert issubclass(parent.__class__, ViewWindow), parent
self.program = program
self.parent = parent
self.isFocusable = False
self.top = 0
self.left = 0
self.rows = 1
self.cols = 1
self.scrollRow = 0
self.scrollCol = 0
self.showCursor = True
self.writeLineRow = 0
self.zOrder = [] | -7,143,664,489,285,558,000 | Args:
parent is responsible for the order in which this window is updated,
relative to its siblings. | app/window.py | __init__ | fsx950223/ci_edit | python | def __init__(self, program, parent):
'\n Args:\n parent is responsible for the order in which this window is updated,\n relative to its siblings.\n '
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
if (parent is not None):
assert issubclass(parent.__class__, ViewWindow), parent
self.program = program
self.parent = parent
self.isFocusable = False
self.top = 0
self.left = 0
self.rows = 1
self.cols = 1
self.scrollRow = 0
self.scrollCol = 0
self.showCursor = True
self.writeLineRow = 0
self.zOrder = [] |
def addStr(self, row, col, text, colorPair):
'Overwrite text at row, column with text.\n\n The caller is responsible for avoiding overdraw.\n '
if app.config.strict_debug:
app.log.check_le(row, self.rows)
app.log.check_le(col, self.cols)
self.program.backgroundFrame.addStr((self.top + row), (self.left + col), text.encode('utf-8'), colorPair) | 2,855,406,542,445,639,700 | Overwrite text at row, column with text.
The caller is responsible for avoiding overdraw. | app/window.py | addStr | fsx950223/ci_edit | python | def addStr(self, row, col, text, colorPair):
'Overwrite text at row, column with text.\n\n The caller is responsible for avoiding overdraw.\n '
if app.config.strict_debug:
app.log.check_le(row, self.rows)
app.log.check_le(col, self.cols)
self.program.backgroundFrame.addStr((self.top + row), (self.left + col), text.encode('utf-8'), colorPair) |
def blank(self, colorPair):
'Clear the window.'
for i in range(self.rows):
self.addStr(i, 0, (' ' * self.cols), colorPair) | -2,598,299,285,167,540,000 | Clear the window. | app/window.py | blank | fsx950223/ci_edit | python | def blank(self, colorPair):
for i in range(self.rows):
self.addStr(i, 0, (' ' * self.cols), colorPair) |
def bringChildToFront(self, child):
'Bring it to the top layer.'
try:
self.zOrder.remove(child)
except ValueError:
pass
self.zOrder.append(child) | 1,967,091,157,131,979,800 | Bring it to the top layer. | app/window.py | bringChildToFront | fsx950223/ci_edit | python | def bringChildToFront(self, child):
try:
self.zOrder.remove(child)
except ValueError:
pass
self.zOrder.append(child) |
def bringToFront(self):
'Bring it to the top layer.'
self.parent.bringChildToFront(self) | -6,172,657,140,682,936,000 | Bring it to the top layer. | app/window.py | bringToFront | fsx950223/ci_edit | python | def bringToFront(self):
self.parent.bringChildToFront(self) |
def contains(self, row, col):
'Determine whether the position at row, col lay within this window.'
for i in self.zOrder:
if i.contains(row, col):
return i
return ((self.top <= row < (self.top + self.rows)) and (self.left <= col < (self.left + self.cols)) and self) | 3,824,916,222,610,197,000 | Determine whether the position at row, col lay within this window. | app/window.py | contains | fsx950223/ci_edit | python | def contains(self, row, col):
for i in self.zOrder:
if i.contains(row, col):
return i
return ((self.top <= row < (self.top + self.rows)) and (self.left <= col < (self.left + self.cols)) and self) |
def detach(self):
"Hide the window by removing self from parents' children, but keep\n same parent to be reattached later."
try:
self.parent.zOrder.remove(self)
except ValueError:
pass | -1,614,139,279,661,907,000 | Hide the window by removing self from parents' children, but keep
same parent to be reattached later. | app/window.py | detach | fsx950223/ci_edit | python | def detach(self):
"Hide the window by removing self from parents' children, but keep\n same parent to be reattached later."
try:
self.parent.zOrder.remove(self)
except ValueError:
pass |
def nextFocusableWindow(self, start, reverse=False):
'Windows without |isFocusable| are skipped. Ignore (skip) |start| when\n searching.\n\n Args:\n start (window): the child window to start from. If |start| is not\n found, start from the first child window.\n reverse (bool): if True, find the prior focusable window.\n\n Returns:\n A window that should be focused.\n\n See also: showFullWindowHierarchy() which can help in debugging.\n '
windows = self.parent.zOrder[:]
if reverse:
windows.reverse()
try:
found = windows.index(start)
except ValueError:
found = (- 1)
windows = windows[(found + 1):]
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if (r is not None):
return r
r = self.parent.nextFocusableWindow(self.parent, reverse)
if (r is not None):
return r
return self._childFocusableWindow(reverse) | 8,010,079,175,092,657,000 | Windows without |isFocusable| are skipped. Ignore (skip) |start| when
searching.
Args:
start (window): the child window to start from. If |start| is not
found, start from the first child window.
reverse (bool): if True, find the prior focusable window.
Returns:
A window that should be focused.
See also: showFullWindowHierarchy() which can help in debugging. | app/window.py | nextFocusableWindow | fsx950223/ci_edit | python | def nextFocusableWindow(self, start, reverse=False):
'Windows without |isFocusable| are skipped. Ignore (skip) |start| when\n searching.\n\n Args:\n start (window): the child window to start from. If |start| is not\n found, start from the first child window.\n reverse (bool): if True, find the prior focusable window.\n\n Returns:\n A window that should be focused.\n\n See also: showFullWindowHierarchy() which can help in debugging.\n '
windows = self.parent.zOrder[:]
if reverse:
windows.reverse()
try:
found = windows.index(start)
except ValueError:
found = (- 1)
windows = windows[(found + 1):]
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if (r is not None):
return r
r = self.parent.nextFocusableWindow(self.parent, reverse)
if (r is not None):
return r
return self._childFocusableWindow(reverse) |
def paint(self, row, col, count, colorPair):
"Paint text a row, column with colorPair.\n\n fyi, I thought this may be faster than using addStr to paint over the\n text with a different colorPair. It looks like there isn't a significant\n performance difference between chgat and addstr.\n "
mainCursesWindow.chgat((self.top + row), (self.left + col), count, colorPair) | -8,640,233,109,933,758,000 | Paint text a row, column with colorPair.
fyi, I thought this may be faster than using addStr to paint over the
text with a different colorPair. It looks like there isn't a significant
performance difference between chgat and addstr. | app/window.py | paint | fsx950223/ci_edit | python | def paint(self, row, col, count, colorPair):
"Paint text a row, column with colorPair.\n\n fyi, I thought this may be faster than using addStr to paint over the\n text with a different colorPair. It looks like there isn't a significant\n performance difference between chgat and addstr.\n "
mainCursesWindow.chgat((self.top + row), (self.left + col), count, colorPair) |
def render(self):
'Redraw window.'
for child in self.zOrder:
child.render() | -7,921,663,333,998,936,000 | Redraw window. | app/window.py | render | fsx950223/ci_edit | python | def render(self):
for child in self.zOrder:
child.render() |
def showWindowHierarchy(self, indent=' '):
'For debugging.'
focus = (u'[f]' if self.isFocusable else u'[ ]')
extra = u''
if hasattr(self, 'label'):
extra += ((u' "' + self.label) + u'"')
app.log.info(('%s%s%s%s' % (indent, focus, self, extra)))
for child in self.zOrder:
child.showWindowHierarchy((indent + u' ')) | 1,114,192,789,658,705,400 | For debugging. | app/window.py | showWindowHierarchy | fsx950223/ci_edit | python | def showWindowHierarchy(self, indent=' '):
focus = (u'[f]' if self.isFocusable else u'[ ]')
extra = u
if hasattr(self, 'label'):
extra += ((u' "' + self.label) + u'"')
app.log.info(('%s%s%s%s' % (indent, focus, self, extra)))
for child in self.zOrder:
child.showWindowHierarchy((indent + u' ')) |
def showFullWindowHierarchy(self, indent=u' '):
'For debugging.'
f = self
while (f.parent is not None):
f = f.parent
assert f
f.showWindowHierarchy() | -1,577,906,576,277,089,500 | For debugging. | app/window.py | showFullWindowHierarchy | fsx950223/ci_edit | python | def showFullWindowHierarchy(self, indent=u' '):
f = self
while (f.parent is not None):
f = f.parent
assert f
f.showWindowHierarchy() |
def longTimeSlice(self):
'returns whether work is finished (no need to call again).'
return True | 9,024,708,500,950,472,000 | returns whether work is finished (no need to call again). | app/window.py | longTimeSlice | fsx950223/ci_edit | python | def longTimeSlice(self):
return True |
def shortTimeSlice(self):
'returns whether work is finished (no need to call again).'
return True | 1,597,862,405,930,659,800 | returns whether work is finished (no need to call again). | app/window.py | shortTimeSlice | fsx950223/ci_edit | python | def shortTimeSlice(self):
return True |
def setParent(self, parent, layerIndex=sys.maxsize):
'Setting the parent will cause the the window to refresh (i.e. if self\n was hidden with detach() it will no longer be hidden).'
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(parent.__class__, ViewWindow), parent
if self.parent:
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
self.parent = parent
if parent:
self.parent.zOrder.insert(layerIndex, self) | -3,288,386,049,949,082,000 | Setting the parent will cause the the window to refresh (i.e. if self
was hidden with detach() it will no longer be hidden). | app/window.py | setParent | fsx950223/ci_edit | python | def setParent(self, parent, layerIndex=sys.maxsize):
'Setting the parent will cause the the window to refresh (i.e. if self\n was hidden with detach() it will no longer be hidden).'
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(parent.__class__, ViewWindow), parent
if self.parent:
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
self.parent = parent
if parent:
self.parent.zOrder.insert(layerIndex, self) |
def writeLine(self, text, color):
'Simple line writer for static windows.'
if app.config.strict_debug:
assert isinstance(text, unicode)
text = text[:self.cols]
text = (text + (u' ' * max(0, (self.cols - len(text)))))
self.program.backgroundFrame.addStr((self.top + self.writeLineRow), self.left, text.encode(u'utf-8'), color)
self.writeLineRow += 1 | 1,040,613,029,633,996,800 | Simple line writer for static windows. | app/window.py | writeLine | fsx950223/ci_edit | python | def writeLine(self, text, color):
if app.config.strict_debug:
assert isinstance(text, unicode)
text = text[:self.cols]
text = (text + (u' ' * max(0, (self.cols - len(text)))))
self.program.backgroundFrame.addStr((self.top + self.writeLineRow), self.left, text.encode(u'utf-8'), color)
self.writeLineRow += 1 |
def focus(self):
'\n Note: to focus a view it must have a controller. Focusing a view without\n a controller would make the program appear to freeze since nothing\n would be responding to user input.\n '
self.hasFocus = True
self.controller.focus() | 3,927,385,452,390,750,000 | Note: to focus a view it must have a controller. Focusing a view without
a controller would make the program appear to freeze since nothing
would be responding to user input. | app/window.py | focus | fsx950223/ci_edit | python | def focus(self):
'\n Note: to focus a view it must have a controller. Focusing a view without\n a controller would make the program appear to freeze since nothing\n would be responding to user input.\n '
self.hasFocus = True
self.controller.focus() |
def longTimeSlice(self):
'returns whether work is finished (no need to call again).'
finished = True
tb = self.textBuffer
if ((tb is not None) and (tb.parser.resumeAtRow < len(tb.lines))):
tb.parseDocument()
finished = (tb.parser.resumeAtRow >= len(tb.lines))
for child in self.zOrder:
finished = (finished and child.longTimeSlice())
return finished | -1,970,306,431,148,542,500 | returns whether work is finished (no need to call again). | app/window.py | longTimeSlice | fsx950223/ci_edit | python | def longTimeSlice(self):
finished = True
tb = self.textBuffer
if ((tb is not None) and (tb.parser.resumeAtRow < len(tb.lines))):
tb.parseDocument()
finished = (tb.parser.resumeAtRow >= len(tb.lines))
for child in self.zOrder:
finished = (finished and child.longTimeSlice())
return finished |
def shortTimeSlice(self):
'returns whether work is finished (no need to call again).'
tb = self.textBuffer
if (tb is not None):
tb.parseScreenMaybe()
return (tb.parser.resumeAtRow >= len(tb.lines))
return True | -7,193,189,450,740,495,000 | returns whether work is finished (no need to call again). | app/window.py | shortTimeSlice | fsx950223/ci_edit | python | def shortTimeSlice(self):
tb = self.textBuffer
if (tb is not None):
tb.parseScreenMaybe()
return (tb.parser.resumeAtRow >= len(tb.lines))
return True |
def getVisibleBookmarks(self, beginRow, endRow):
'\n Args:\n beginRow (int): the index of the line number that you want the list of\n bookmarks to start from.\n endRow (int): the index of the line number that you want the list of\n bookmarks to end at (exclusive).\n\n Returns:\n A list containing the bookmarks that are displayed on the screen. If\n there are no bookmarks, returns an empty list.\n '
bookmarkList = self.host.textBuffer.bookmarks
beginIndex = endIndex = 0
if len(bookmarkList):
needle = app.bookmark.Bookmark(beginRow, beginRow, {})
beginIndex = bisect.bisect_left(bookmarkList, needle)
if ((beginIndex > 0) and (bookmarkList[(beginIndex - 1)].end >= beginRow)):
beginIndex -= 1
needle.range = (endRow, endRow)
endIndex = bisect.bisect_left(bookmarkList, needle)
return bookmarkList[beginIndex:endIndex] | 5,004,336,973,067,513,000 | Args:
beginRow (int): the index of the line number that you want the list of
bookmarks to start from.
endRow (int): the index of the line number that you want the list of
bookmarks to end at (exclusive).
Returns:
A list containing the bookmarks that are displayed on the screen. If
there are no bookmarks, returns an empty list. | app/window.py | getVisibleBookmarks | fsx950223/ci_edit | python | def getVisibleBookmarks(self, beginRow, endRow):
'\n Args:\n beginRow (int): the index of the line number that you want the list of\n bookmarks to start from.\n endRow (int): the index of the line number that you want the list of\n bookmarks to end at (exclusive).\n\n Returns:\n A list containing the bookmarks that are displayed on the screen. If\n there are no bookmarks, returns an empty list.\n '
bookmarkList = self.host.textBuffer.bookmarks
beginIndex = endIndex = 0
if len(bookmarkList):
needle = app.bookmark.Bookmark(beginRow, beginRow, {})
beginIndex = bisect.bisect_left(bookmarkList, needle)
if ((beginIndex > 0) and (bookmarkList[(beginIndex - 1)].end >= beginRow)):
beginIndex -= 1
needle.range = (endRow, endRow)
endIndex = bisect.bisect_left(bookmarkList, needle)
return bookmarkList[beginIndex:endIndex] |
def addSelectOptionsRow(self, label, optionsList):
'Such as a radio group.'
optionsRow = OptionsRow(self.program, self)
optionsRow.color = self.program.color.get(u'keyword')
optionsRow.addLabel(label)
optionsDict = {}
optionsRow.beginGroup()
for key in optionsList:
optionsDict[key] = False
optionsRow.addSelection(key, optionsDict)
optionsRow.endGroup()
optionsDict[optionsList[0]] = True
optionsRow.setParent(self)
return (optionsDict, optionsRow) | 2,664,489,969,004,587,500 | Such as a radio group. | app/window.py | addSelectOptionsRow | fsx950223/ci_edit | python | def addSelectOptionsRow(self, label, optionsList):
optionsRow = OptionsRow(self.program, self)
optionsRow.color = self.program.color.get(u'keyword')
optionsRow.addLabel(label)
optionsDict = {}
optionsRow.beginGroup()
for key in optionsList:
optionsDict[key] = False
optionsRow.addSelection(key, optionsDict)
optionsRow.endGroup()
optionsDict[optionsList[0]] = True
optionsRow.setParent(self)
return (optionsDict, optionsRow) |
def render(self):
'Render the context information at the top of the window.'
lines = self.lines[(- self.mode):]
lines.reverse()
color = self.program.color.get('top_info')
for (i, line) in enumerate(lines):
self.addStr(i, 0, (line + (u' ' * (self.cols - len(line))))[:self.cols], color)
for i in range(len(lines), self.rows):
self.addStr(i, 0, (u' ' * self.cols), color) | -5,203,097,784,340,251,000 | Render the context information at the top of the window. | app/window.py | render | fsx950223/ci_edit | python | def render(self):
lines = self.lines[(- self.mode):]
lines.reverse()
color = self.program.color.get('top_info')
for (i, line) in enumerate(lines):
self.addStr(i, 0, (line + (u' ' * (self.cols - len(line))))[:self.cols], color)
for i in range(len(lines), self.rows):
self.addStr(i, 0, (u' ' * self.cols), color) |
def layout(self):
'Change self and sub-windows to fit within the given rectangle.'
(top, left, rows, cols) = self.outerShape
lineNumbersCols = 7
topRows = self.topRows
bottomRows = max(1, self.interactiveFind.preferredSize(rows, cols)[0])
self.logoCorner.reshape(top, left, 2, lineNumbersCols)
if (self.showTopInfo and (rows > topRows) and (cols > lineNumbersCols)):
self.topInfo.reshape(top, (left + lineNumbersCols), topRows, (cols - lineNumbersCols))
top += topRows
rows -= topRows
rows -= bottomRows
bottomFirstRow = (top + rows)
self.confirmClose.reshape(bottomFirstRow, left, bottomRows, cols)
self.confirmOverwrite.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrediction.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrompt.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveQuit.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showMessageLine:
self.messageLine.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveFind.reshape(bottomFirstRow, left, bottomRows, cols)
if 1:
self.interactiveGoto.reshape(bottomFirstRow, left, bottomRows, cols)
if (self.showFooter and (rows > 0)):
self.statusLine.reshape((bottomFirstRow - self.statusLineCount), left, self.statusLineCount, cols)
rows -= self.statusLineCount
if (self.showLineNumbers and (cols > lineNumbersCols)):
self.lineNumberColumn.reshape(top, left, rows, lineNumbersCols)
cols -= lineNumbersCols
left += lineNumbersCols
if (self.showRightColumn and (cols > 0)):
self.rightColumn.reshape(top, ((left + cols) - 1), rows, 1)
cols -= 1
Window.reshape(self, top, left, rows, cols) | -6,147,367,660,641,850,000 | Change self and sub-windows to fit within the given rectangle. | app/window.py | layout | fsx950223/ci_edit | python | def layout(self):
(top, left, rows, cols) = self.outerShape
lineNumbersCols = 7
topRows = self.topRows
bottomRows = max(1, self.interactiveFind.preferredSize(rows, cols)[0])
self.logoCorner.reshape(top, left, 2, lineNumbersCols)
if (self.showTopInfo and (rows > topRows) and (cols > lineNumbersCols)):
self.topInfo.reshape(top, (left + lineNumbersCols), topRows, (cols - lineNumbersCols))
top += topRows
rows -= topRows
rows -= bottomRows
bottomFirstRow = (top + rows)
self.confirmClose.reshape(bottomFirstRow, left, bottomRows, cols)
self.confirmOverwrite.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrediction.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrompt.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveQuit.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showMessageLine:
self.messageLine.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveFind.reshape(bottomFirstRow, left, bottomRows, cols)
if 1:
self.interactiveGoto.reshape(bottomFirstRow, left, bottomRows, cols)
if (self.showFooter and (rows > 0)):
self.statusLine.reshape((bottomFirstRow - self.statusLineCount), left, self.statusLineCount, cols)
rows -= self.statusLineCount
if (self.showLineNumbers and (cols > lineNumbersCols)):
self.lineNumberColumn.reshape(top, left, rows, lineNumbersCols)
cols -= lineNumbersCols
left += lineNumbersCols
if (self.showRightColumn and (cols > 0)):
self.rightColumn.reshape(top, ((left + cols) - 1), rows, 1)
cols -= 1
Window.reshape(self, top, left, rows, cols) |
def drawLogoCorner(self):
'.'
logo = self.logoCorner
if ((logo.rows <= 0) or (logo.cols <= 0)):
return
color = self.program.color.get('logo')
for i in range(logo.rows):
logo.addStr(i, 0, (u' ' * logo.cols), color)
logo.addStr(0, 1, u'ci'[:self.cols], color)
logo.render() | 5,193,410,886,103,535,000 | . | app/window.py | drawLogoCorner | fsx950223/ci_edit | python | def drawLogoCorner(self):
logo = selflogoCorner
if ((logorows <= 0) or (logocols <= 0)):
return
color = selfprogramcolorget('logo')
for i in range(logorows):
logoaddStr(i, 0, (u' ' * logocols), color)
logoaddStr(0, 1, u'ci'[:selfcols], color)
logorender() |
def drawRightEdge(self):
'Draw makers to indicate text extending past the right edge of the\n window.'
(maxRow, maxCol) = (self.rows, self.cols)
limit = min(maxRow, (len(self.textBuffer.lines) - self.scrollRow))
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get('right_column')
if ((len(self.textBuffer.lines[(i + self.scrollRow)]) - self.scrollCol) > maxCol):
color = colorPrefs.get('line_overflow')
self.rightColumn.addStr(i, 0, u' ', color)
color = colorPrefs.get('outside_document')
for i in range(limit, maxRow):
self.rightColumn.addStr(i, 0, u' ', color) | -2,545,021,662,017,133,000 | Draw makers to indicate text extending past the right edge of the
window. | app/window.py | drawRightEdge | fsx950223/ci_edit | python | def drawRightEdge(self):
'Draw makers to indicate text extending past the right edge of the\n window.'
(maxRow, maxCol) = (self.rows, self.cols)
limit = min(maxRow, (len(self.textBuffer.lines) - self.scrollRow))
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get('right_column')
if ((len(self.textBuffer.lines[(i + self.scrollRow)]) - self.scrollCol) > maxCol):
color = colorPrefs.get('line_overflow')
self.rightColumn.addStr(i, 0, u' ', color)
color = colorPrefs.get('outside_document')
for i in range(limit, maxRow):
self.rightColumn.addStr(i, 0, u' ', color) |
def reshape(self, top, left, rows, cols):
'Change self and sub-windows to fit within the given rectangle.'
app.log.detail(top, left, rows, cols)
Window.reshape(self, top, left, rows, cols)
self.outerShape = (top, left, rows, cols)
self.layout() | -6,415,804,925,731,782,000 | Change self and sub-windows to fit within the given rectangle. | app/window.py | reshape | fsx950223/ci_edit | python | def reshape(self, top, left, rows, cols):
app.log.detail(top, left, rows, cols)
Window.reshape(self, top, left, rows, cols)
self.outerShape = (top, left, rows, cols)
self.layout() |
def beginGroup(self):
'Like a radio group, or column sort headers.'
self.group = [] | 601,685,624,623,827,700 | Like a radio group, or column sort headers. | app/window.py | beginGroup | fsx950223/ci_edit | python | def beginGroup(self):
self.group = [] |
def endGroup(self):
'Like a radio group, or column sort headers.'
pass | -6,653,601,723,547,911,000 | Like a radio group, or column sort headers. | app/window.py | endGroup | fsx950223/ci_edit | python | def endGroup(self):
pass |
def render(self):
'Display a box of text in the center of the window.'
(maxRows, maxCols) = (self.host.rows, self.host.cols)
cols = min((self.longestLineLength + 6), maxCols)
rows = min((len(self.__message) + 4), maxRows)
self.resizeTo(rows, cols)
self.moveTo(((maxRows // 2) - (rows // 2)), ((maxCols // 2) - (cols // 2)))
color = self.program.color.get('popup_window')
for row in range(rows):
if ((row == (rows - 2)) and self.showOptions):
message = '/'.join(self.options)
elif ((row == 0) or (row >= (rows - 3))):
self.addStr(row, 0, (' ' * cols), color)
continue
else:
message = self.__message[(row - 1)]
lineLength = len(message)
spacing1 = ((cols - lineLength) // 2)
spacing2 = ((cols - lineLength) - spacing1)
self.addStr(row, 0, (((' ' * spacing1) + message) + (' ' * spacing2)), color) | 39,768,860,031,338,680 | Display a box of text in the center of the window. | app/window.py | render | fsx950223/ci_edit | python | def render(self):
(maxRows, maxCols) = (self.host.rows, self.host.cols)
cols = min((self.longestLineLength + 6), maxCols)
rows = min((len(self.__message) + 4), maxRows)
self.resizeTo(rows, cols)
self.moveTo(((maxRows // 2) - (rows // 2)), ((maxCols // 2) - (cols // 2)))
color = self.program.color.get('popup_window')
for row in range(rows):
if ((row == (rows - 2)) and self.showOptions):
message = '/'.join(self.options)
elif ((row == 0) or (row >= (rows - 3))):
self.addStr(row, 0, (' ' * cols), color)
continue
else:
message = self.__message[(row - 1)]
lineLength = len(message)
spacing1 = ((cols - lineLength) // 2)
spacing2 = ((cols - lineLength) - spacing1)
self.addStr(row, 0, (((' ' * spacing1) + message) + (' ' * spacing2)), color) |
def setMessage(self, message):
"Sets the Popup window's message to the given message.\n\n message (str): A string that you want to display.\n\n Returns:\n None.\n "
self.__message = message.split('\n')
self.longestLineLength = max([len(line) for line in self.__message]) | 8,066,038,472,976,924,000 | Sets the Popup window's message to the given message.
message (str): A string that you want to display.
Returns:
None. | app/window.py | setMessage | fsx950223/ci_edit | python | def setMessage(self, message):
"Sets the Popup window's message to the given message.\n\n message (str): A string that you want to display.\n\n Returns:\n None.\n "
self.__message = message.split('\n')
self.longestLineLength = max([len(line) for line in self.__message]) |
def setOptionsToDisplay(self, options):
"\n This function is used to change the options that are displayed in the\n popup window. They will be separated by a '/' character when displayed.\n\n Args:\n options (list): A list of possible keys which the user can press and\n should be responded to by the controller.\n "
self.options = options | 3,100,408,263,422,973,400 | This function is used to change the options that are displayed in the
popup window. They will be separated by a '/' character when displayed.
Args:
options (list): A list of possible keys which the user can press and
should be responded to by the controller. | app/window.py | setOptionsToDisplay | fsx950223/ci_edit | python | def setOptionsToDisplay(self, options):
"\n This function is used to change the options that are displayed in the\n popup window. They will be separated by a '/' character when displayed.\n\n Args:\n options (list): A list of possible keys which the user can press and\n should be responded to by the controller.\n "
self.options = options |
def splitWindow(self):
'Experimental.'
app.log.info()
other = InputWindow(self.prg, self)
other.setTextBuffer(self.textBuffer)
app.log.info()
self.prg.zOrder.append(other)
self.prg.layout()
app.log.info() | -6,914,465,428,376,622,000 | Experimental. | app/window.py | splitWindow | fsx950223/ci_edit | python | def splitWindow(self):
app.log.info()
other = InputWindow(self.prg, self)
other.setTextBuffer(self.textBuffer)
app.log.info()
self.prg.zOrder.append(other)
self.prg.layout()
app.log.info() |
def __init__(self, browser='ff', browser_version=None, os_name=None):
'Constructor for the Driver factory'
self.browser = browser
self.browser_version = browser_version
self.os_name = os_name | -3,671,027,513,323,766,300 | Constructor for the Driver factory | QA/page_objects/DriverFactory.py | __init__ | akkuldn/interview-scheduler | python | def __init__(self, browser='ff', browser_version=None, os_name=None):
self.browser = browser
self.browser_version = browser_version
self.os_name = os_name |
def get_web_driver(self, remote_flag, os_name, os_version, browser, browser_version):
'Return the appropriate driver'
if (remote_flag.lower() == 'n'):
web_driver = self.run_local(os_name, os_version, browser, browser_version)
else:
print('DriverFactory does not know the browser: ', browser)
web_driver = None
return web_driver | 4,267,462,555,282,191,000 | Return the appropriate driver | QA/page_objects/DriverFactory.py | get_web_driver | akkuldn/interview-scheduler | python | def get_web_driver(self, remote_flag, os_name, os_version, browser, browser_version):
if (remote_flag.lower() == 'n'):
web_driver = self.run_local(os_name, os_version, browser, browser_version)
else:
print('DriverFactory does not know the browser: ', browser)
web_driver = None
return web_driver |
def run_local(self, os_name, os_version, browser, browser_version):
'Return the local driver'
local_driver = None
if ((browser.lower() == 'ff') or (browser.lower() == 'firefox')):
local_driver = webdriver.Firefox()
elif (browser.lower() == 'ie'):
local_driver = webdriver.Ie()
elif (browser.lower() == 'chrome'):
local_driver = webdriver.Chrome()
elif (browser.lower() == 'opera'):
opera_options = None
try:
opera_browser_location = opera_browser_conf.location
options = webdriver.ChromeOptions()
options.binary_location = opera_browser_location
local_driver = webdriver.Opera(options=options)
except Exception as e:
print(('\nException when trying to get remote webdriver:%s' % sys.modules[__name__]))
print(('Python says:%s' % str(e)))
if ('no Opera binary' in str(e)):
print('SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n')
elif (browser.lower() == 'safari'):
local_driver = webdriver.Safari()
return local_driver | 2,636,067,257,050,410,000 | Return the local driver | QA/page_objects/DriverFactory.py | run_local | akkuldn/interview-scheduler | python | def run_local(self, os_name, os_version, browser, browser_version):
local_driver = None
if ((browser.lower() == 'ff') or (browser.lower() == 'firefox')):
local_driver = webdriver.Firefox()
elif (browser.lower() == 'ie'):
local_driver = webdriver.Ie()
elif (browser.lower() == 'chrome'):
local_driver = webdriver.Chrome()
elif (browser.lower() == 'opera'):
opera_options = None
try:
opera_browser_location = opera_browser_conf.location
options = webdriver.ChromeOptions()
options.binary_location = opera_browser_location
local_driver = webdriver.Opera(options=options)
except Exception as e:
print(('\nException when trying to get remote webdriver:%s' % sys.modules[__name__]))
print(('Python says:%s' % str(e)))
if ('no Opera binary' in str(e)):
print('SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n')
elif (browser.lower() == 'safari'):
local_driver = webdriver.Safari()
return local_driver |
def get_firefox_driver(self):
'Return the Firefox driver'
driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile())
return driver | 8,915,438,688,894,075,000 | Return the Firefox driver | QA/page_objects/DriverFactory.py | get_firefox_driver | akkuldn/interview-scheduler | python | def get_firefox_driver(self):
driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile())
return driver |
def get_firefox_profile(self):
'Return a firefox profile'
return self.set_firefox_profile() | 831,864,429,673,104,300 | Return a firefox profile | QA/page_objects/DriverFactory.py | get_firefox_profile | akkuldn/interview-scheduler | python | def get_firefox_profile(self):
return self.set_firefox_profile() |
def set_firefox_profile(self):
'Setup firefox with the right preferences and return a profile'
try:
self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'downloads'))
if (not os.path.exists(self.download_dir)):
os.makedirs(self.download_dir)
except Exception as e:
print('Exception when trying to set directory structure')
print(str(e))
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
set_pref = profile.set_preference
set_pref('browser.download.folderList', 2)
set_pref('browser.download.dir', self.download_dir)
set_pref('browser.download.useDownloadDir', True)
set_pref('browser.helperApps.alwaysAsk.force', False)
set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf')
set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream')
set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf')
set_pref('pdfjs.disabled', True)
return profile | -5,341,116,849,534,817,000 | Setup firefox with the right preferences and return a profile | QA/page_objects/DriverFactory.py | set_firefox_profile | akkuldn/interview-scheduler | python | def set_firefox_profile(self):
try:
self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'downloads'))
if (not os.path.exists(self.download_dir)):
os.makedirs(self.download_dir)
except Exception as e:
print('Exception when trying to set directory structure')
print(str(e))
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
set_pref = profile.set_preference
set_pref('browser.download.folderList', 2)
set_pref('browser.download.dir', self.download_dir)
set_pref('browser.download.useDownloadDir', True)
set_pref('browser.helperApps.alwaysAsk.force', False)
set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf')
set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream')
set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf')
set_pref('pdfjs.disabled', True)
return profile |
def __init__(self, jailer_id, exec_file, numa_node=0, uid=1234, gid=1234, chroot_base=JAILER_DEFAULT_CHROOT, netns=None, daemonize=True, seccomp_level=2):
"Set up jailer fields.\n\n This plays the role of a default constructor as it populates\n the jailer's fields with some default values. Each field can be\n further adjusted by each test even with None values.\n "
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = (netns if (netns is not None) else jailer_id)
self.daemonize = daemonize
self.seccomp_level = seccomp_level | -1,773,748,112,532,319,200 | Set up jailer fields.
This plays the role of a default constructor as it populates
the jailer's fields with some default values. Each field can be
further adjusted by each test even with None values. | tests/framework/jailer.py | __init__ | Pennyzct/firecracker | python | def __init__(self, jailer_id, exec_file, numa_node=0, uid=1234, gid=1234, chroot_base=JAILER_DEFAULT_CHROOT, netns=None, daemonize=True, seccomp_level=2):
"Set up jailer fields.\n\n This plays the role of a default constructor as it populates\n the jailer's fields with some default values. Each field can be\n further adjusted by each test even with None values.\n "
self.jailer_id = jailer_id
self.exec_file = exec_file
self.numa_node = numa_node
self.uid = uid
self.gid = gid
self.chroot_base = chroot_base
self.netns = (netns if (netns is not None) else jailer_id)
self.daemonize = daemonize
self.seccomp_level = seccomp_level |
def __del__(self):
'Cleanup this jailer context.'
self.cleanup() | -6,640,352,949,861,939,000 | Cleanup this jailer context. | tests/framework/jailer.py | __del__ | Pennyzct/firecracker | python | def __del__(self):
self.cleanup() |
def construct_param_list(self):
'Create the list of parameters we want the jailer to start with.\n\n We want to be able to vary any parameter even the required ones as we\n might want to add integration tests that validate the enforcement of\n mandatory arguments.\n '
jailer_param_list = []
if (self.jailer_id is not None):
jailer_param_list.extend(['--id', str(self.jailer_id)])
if (self.exec_file is not None):
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if (self.numa_node is not None):
jailer_param_list.extend(['--node', str(self.numa_node)])
if (self.uid is not None):
jailer_param_list.extend(['--uid', str(self.uid)])
if (self.gid is not None):
jailer_param_list.extend(['--gid', str(self.gid)])
if (self.chroot_base is not None):
jailer_param_list.extend(['--chroot-base-dir', str(self.chroot_base)])
if (self.netns is not None):
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if (self.seccomp_level is not None):
jailer_param_list.extend(['--seccomp-level', str(self.seccomp_level)])
return jailer_param_list | -228,787,732,456,637,760 | Create the list of parameters we want the jailer to start with.
We want to be able to vary any parameter even the required ones as we
might want to add integration tests that validate the enforcement of
mandatory arguments. | tests/framework/jailer.py | construct_param_list | Pennyzct/firecracker | python | def construct_param_list(self):
'Create the list of parameters we want the jailer to start with.\n\n We want to be able to vary any parameter even the required ones as we\n might want to add integration tests that validate the enforcement of\n mandatory arguments.\n '
jailer_param_list = []
if (self.jailer_id is not None):
jailer_param_list.extend(['--id', str(self.jailer_id)])
if (self.exec_file is not None):
jailer_param_list.extend(['--exec-file', str(self.exec_file)])
if (self.numa_node is not None):
jailer_param_list.extend(['--node', str(self.numa_node)])
if (self.uid is not None):
jailer_param_list.extend(['--uid', str(self.uid)])
if (self.gid is not None):
jailer_param_list.extend(['--gid', str(self.gid)])
if (self.chroot_base is not None):
jailer_param_list.extend(['--chroot-base-dir', str(self.chroot_base)])
if (self.netns is not None):
jailer_param_list.extend(['--netns', str(self.netns_file_path())])
if self.daemonize:
jailer_param_list.append('--daemonize')
if (self.seccomp_level is not None):
jailer_param_list.extend(['--seccomp-level', str(self.seccomp_level)])
return jailer_param_list |
def chroot_base_with_id(self):
'Return the MicroVM chroot base + MicroVM ID.'
return os.path.join((self.chroot_base if (self.chroot_base is not None) else JAILER_DEFAULT_CHROOT), FC_BINARY_NAME, self.jailer_id) | -5,389,557,589,996,205,000 | Return the MicroVM chroot base + MicroVM ID. | tests/framework/jailer.py | chroot_base_with_id | Pennyzct/firecracker | python | def chroot_base_with_id(self):
return os.path.join((self.chroot_base if (self.chroot_base is not None) else JAILER_DEFAULT_CHROOT), FC_BINARY_NAME, self.jailer_id) |
def api_socket_path(self):
'Return the MicroVM API socket path.'
return os.path.join(self.chroot_path(), API_USOCKET_NAME) | -4,672,210,081,637,536,000 | Return the MicroVM API socket path. | tests/framework/jailer.py | api_socket_path | Pennyzct/firecracker | python | def api_socket_path(self):
return os.path.join(self.chroot_path(), API_USOCKET_NAME) |
def chroot_path(self):
'Return the MicroVM chroot path.'
return os.path.join(self.chroot_base_with_id(), 'root') | -2,333,839,329,058,452,000 | Return the MicroVM chroot path. | tests/framework/jailer.py | chroot_path | Pennyzct/firecracker | python | def chroot_path(self):
return os.path.join(self.chroot_base_with_id(), 'root') |
def jailed_path(self, file_path, create=False):
'Create a hard link owned by uid:gid.\n\n Create a hard link to the specified file, changes the owner to\n uid:gid, and returns a path to the link which is valid within the jail.\n '
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join('/', file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p | 9,211,794,167,984,370,000 | Create a hard link owned by uid:gid.
Create a hard link to the specified file, changes the owner to
uid:gid, and returns a path to the link which is valid within the jail. | tests/framework/jailer.py | jailed_path | Pennyzct/firecracker | python | def jailed_path(self, file_path, create=False):
'Create a hard link owned by uid:gid.\n\n Create a hard link to the specified file, changes the owner to\n uid:gid, and returns a path to the link which is valid within the jail.\n '
file_name = os.path.basename(file_path)
global_p = os.path.join(self.chroot_path(), file_name)
jailed_p = os.path.join('/', file_name)
if create:
cmd = 'ln -f {} {}'.format(file_path, global_p)
run(cmd, shell=True, check=True)
cmd = 'chown {}:{} {}'.format(self.uid, self.gid, global_p)
run(cmd, shell=True, check=True)
return jailed_p |
def netns_file_path(self):
'Get the host netns file path for a jailer context.\n\n Returns the path on the host to the file which represents the netns,\n and which must be passed to the jailer as the value of the --netns\n parameter, when in use.\n '
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None | 2,430,046,924,320,250,400 | Get the host netns file path for a jailer context.
Returns the path on the host to the file which represents the netns,
and which must be passed to the jailer as the value of the --netns
parameter, when in use. | tests/framework/jailer.py | netns_file_path | Pennyzct/firecracker | python | def netns_file_path(self):
'Get the host netns file path for a jailer context.\n\n Returns the path on the host to the file which represents the netns,\n and which must be passed to the jailer as the value of the --netns\n parameter, when in use.\n '
if self.netns:
return '/var/run/netns/{}'.format(self.netns)
return None |
def netns_cmd_prefix(self):
'Return the jailer context netns file prefix.'
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return '' | -5,883,324,889,070,482,000 | Return the jailer context netns file prefix. | tests/framework/jailer.py | netns_cmd_prefix | Pennyzct/firecracker | python | def netns_cmd_prefix(self):
if self.netns:
return 'ip netns exec {} '.format(self.netns)
return |
def setup(self):
'Set up this jailer context.'
os.makedirs((self.chroot_base if (self.chroot_base is not None) else JAILER_DEFAULT_CHROOT), exist_ok=True)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True) | -4,179,125,551,643,208,700 | Set up this jailer context. | tests/framework/jailer.py | setup | Pennyzct/firecracker | python | def setup(self):
os.makedirs((self.chroot_base if (self.chroot_base is not None) else JAILER_DEFAULT_CHROOT), exist_ok=True)
if self.netns:
run('ip netns add {}'.format(self.netns), shell=True, check=True) |
def cleanup(self):
'Clean up this jailer context.'
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run('ip netns del {}'.format(self.netns), shell=True, stderr=PIPE)
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
try:
retry_call(f=self._kill_crgoup_tasks, fargs=[controller], exceptions=TimeoutError, max_delay=5)
except TimeoutError:
pass
back_cmd = '-depth -type d -exec rmdir {} \\;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(controller, FC_BINARY_NAME, self.jailer_id, back_cmd)
_ = run(cmd, shell=True, stderr=PIPE) | 5,550,420,028,613,648,000 | Clean up this jailer context. | tests/framework/jailer.py | cleanup | Pennyzct/firecracker | python | def cleanup(self):
shutil.rmtree(self.chroot_base_with_id(), ignore_errors=True)
if self.netns:
_ = run('ip netns del {}'.format(self.netns), shell=True, stderr=PIPE)
controllers = ('cpu', 'cpuset', 'pids')
for controller in controllers:
try:
retry_call(f=self._kill_crgoup_tasks, fargs=[controller], exceptions=TimeoutError, max_delay=5)
except TimeoutError:
pass
back_cmd = '-depth -type d -exec rmdir {} \\;'
cmd = 'find /sys/fs/cgroup/{}/{}/{} {}'.format(controller, FC_BINARY_NAME, self.jailer_id, back_cmd)
_ = run(cmd, shell=True, stderr=PIPE) |
def _kill_crgoup_tasks(self, controller):
'Simulate wait on pid.\n\n Read the tasks file and stay there until /proc/{pid}\n disappears. The retry function that calls this code makes\n sure we do not timeout.\n '
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(controller, FC_BINARY_NAME, self.jailer_id)
if (not os.path.exists(tasks_file)):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists('/proc/{}'.format(task)):
raise TimeoutError
return True | -8,661,605,086,853,101,000 | Simulate wait on pid.
Read the tasks file and stay there until /proc/{pid}
disappears. The retry function that calls this code makes
sure we do not timeout. | tests/framework/jailer.py | _kill_crgoup_tasks | Pennyzct/firecracker | python | def _kill_crgoup_tasks(self, controller):
'Simulate wait on pid.\n\n Read the tasks file and stay there until /proc/{pid}\n disappears. The retry function that calls this code makes\n sure we do not timeout.\n '
tasks_file = '/sys/fs/cgroup/{}/{}/{}/tasks'.format(controller, FC_BINARY_NAME, self.jailer_id)
if (not os.path.exists(tasks_file)):
return True
cmd = 'cat {}'.format(tasks_file)
tasks = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')
tasks_split = tasks.splitlines()
for task in tasks_split:
if os.path.exists('/proc/{}'.format(task)):
raise TimeoutError
return True |
def __init__(self, *args, **kwargs):
'\n user object is passed to the form in kwargs in the view\n the user objected is removed from kwargs and then the\n super class form object is instantiated. This is because\n our form needs the user object not its super class.\n '
self.user = kwargs.pop('user', None)
super(ReviewForm, self).__init__(*args, **kwargs) | 83,406,404,375,282,610 | user object is passed to the form in kwargs in the view
the user objected is removed from kwargs and then the
super class form object is instantiated. This is because
our form needs the user object not its super class. | reviews/forms.py | __init__ | mohammadasim/online-bookstore | python | def __init__(self, *args, **kwargs):
'\n user object is passed to the form in kwargs in the view\n the user objected is removed from kwargs and then the\n super class form object is instantiated. This is because\n our form needs the user object not its super class.\n '
self.user = kwargs.pop('user', None)
super(ReviewForm, self).__init__(*args, **kwargs) |
def clean_book(self, *args, **kwargs):
'\n This method checks if a user has already reviewed\n the selected book. As per django docs exists() is\n an efficient way of checking this.\n '
book = self.cleaned_data.get('book')
if Review.objects.filter(book=book, author=self.user).exists():
raise forms.ValidationError('Book already reviewed by user {}'.format(self.user))
else:
return book | -1,303,249,135,532,956,400 | This method checks if a user has already reviewed
the selected book. As per django docs exists() is
an efficient way of checking this. | reviews/forms.py | clean_book | mohammadasim/online-bookstore | python | def clean_book(self, *args, **kwargs):
'\n This method checks if a user has already reviewed\n the selected book. As per django docs exists() is\n an efficient way of checking this.\n '
book = self.cleaned_data.get('book')
if Review.objects.filter(book=book, author=self.user).exists():
raise forms.ValidationError('Book already reviewed by user {}'.format(self.user))
else:
return book |
def __init__(self, taxonomy: Union[(pd.DataFrame, pd.Series, str)], taxonomy_columns: Union[(str, int, Sequence[Union[(int, str)]])]=None, **kwargs: Any) -> None:
'Constructor for :class:`.RepTaxonomy`\n\n Parameters\n ----------\n taxonomy\n Data containing feature taxonomy\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.\n '
tmp_metadata = kwargs.pop('metadata', {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if (taxonomy.shape[0] > 0):
if (taxonomy.shape[1] > 1):
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError('Provided `taxonomy` Datafame has invalid ranks.')
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError('Provided `taxonomy` Datafame is invalid.')
elif isinstance(taxonomy, pd.Series):
if (taxonomy.shape[0] > 0):
tmp_taxonomy = taxonomy
else:
raise ValueError('Provided `taxonomy` Series is invalid.')
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[(- 1)].lower()
if (file_extension in ['.csv', '.tsv']):
if (taxonomy_columns is None):
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None))
elif isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None)).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None)).loc[:, taxonomy_columns]
elif (file_extension in ['.biom', '.biome']):
(tmp_taxonomy, new_metadata) = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({'biom': new_metadata})
else:
raise NotImplementedError('File type is not supported.')
else:
raise FileNotFoundError('Provided `taxonomy` file path is invalid.')
else:
raise TypeError('Provided `taxonomy` has invalid type.')
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs) | -7,528,386,294,425,855 | Constructor for :class:`.RepTaxonomy`
Parameters
----------
taxonomy
Data containing feature taxonomy
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to :func:`~pandas.read_csv` or :mod:`biome` loader. | pmaf/biome/essentials/_taxonomy.py | __init__ | mmtechslv/PhyloMAF | python | def __init__(self, taxonomy: Union[(pd.DataFrame, pd.Series, str)], taxonomy_columns: Union[(str, int, Sequence[Union[(int, str)]])]=None, **kwargs: Any) -> None:
'Constructor for :class:`.RepTaxonomy`\n\n Parameters\n ----------\n taxonomy\n Data containing feature taxonomy\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to :func:`~pandas.read_csv` or :mod:`biome` loader.\n '
tmp_metadata = kwargs.pop('metadata', {})
self.__avail_ranks = []
self.__internal_taxonomy = None
if isinstance(taxonomy, pd.DataFrame):
if (taxonomy.shape[0] > 0):
if (taxonomy.shape[1] > 1):
if validate_ranks(list(taxonomy.columns.values), VALID_RANKS):
tmp_taxonomy = taxonomy
else:
raise ValueError('Provided `taxonomy` Datafame has invalid ranks.')
else:
tmp_taxonomy = taxonomy.iloc[:, 0]
else:
raise ValueError('Provided `taxonomy` Datafame is invalid.')
elif isinstance(taxonomy, pd.Series):
if (taxonomy.shape[0] > 0):
tmp_taxonomy = taxonomy
else:
raise ValueError('Provided `taxonomy` Series is invalid.')
elif isinstance(taxonomy, str):
if path.isfile(taxonomy):
file_extension = path.splitext(taxonomy)[(- 1)].lower()
if (file_extension in ['.csv', '.tsv']):
if (taxonomy_columns is None):
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None))
elif isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None)).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(taxonomy, sep=kwargs.pop('sep', ','), header=kwargs.pop('header', 'infer'), index_col=kwargs.pop('index_col', None)).loc[:, taxonomy_columns]
elif (file_extension in ['.biom', '.biome']):
(tmp_taxonomy, new_metadata) = self.__load_biom(taxonomy, **kwargs)
tmp_metadata.update({'biom': new_metadata})
else:
raise NotImplementedError('File type is not supported.')
else:
raise FileNotFoundError('Provided `taxonomy` file path is invalid.')
else:
raise TypeError('Provided `taxonomy` has invalid type.')
self.__init_internal_taxonomy(tmp_taxonomy, **kwargs)
super().__init__(metadata=tmp_metadata, **kwargs) |
@classmethod
def from_csv(cls, filepath: str, taxonomy_columns: Union[(str, int, Sequence[Union[(int, str)]])]=None, **kwargs: Any) -> 'RepTaxonomy':
'Factory method to construct a :class:`.RepTaxonomy` from CSV file.\n\n Parameters\n ----------\n filepath\n Path to .csv File\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to the constructor.\n filepath:\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n '
if (taxonomy_columns is None):
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
elif isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop('metadata', {})
tmp_metadata.update({'filepath': path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs) | 7,537,830,283,664,341,000 | Factory method to construct a :class:`.RepTaxonomy` from CSV file.
Parameters
----------
filepath
Path to .csv File
taxonomy_columns
Column(s) containing taxonomy data
kwargs
Passed to the constructor.
filepath:
Returns
-------
Instance of
class:`.RepTaxonomy` | pmaf/biome/essentials/_taxonomy.py | from_csv | mmtechslv/PhyloMAF | python | @classmethod
def from_csv(cls, filepath: str, taxonomy_columns: Union[(str, int, Sequence[Union[(int, str)]])]=None, **kwargs: Any) -> 'RepTaxonomy':
'Factory method to construct a :class:`.RepTaxonomy` from CSV file.\n\n Parameters\n ----------\n filepath\n Path to .csv File\n taxonomy_columns\n Column(s) containing taxonomy data\n kwargs\n Passed to the constructor.\n filepath:\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n '
if (taxonomy_columns is None):
tmp_taxonomy = pd.read_csv(filepath, **kwargs)
elif isinstance(taxonomy_columns, int):
tmp_taxonomy = pd.read_csv(filepath, **kwargs).iloc[:, taxonomy_columns]
else:
tmp_taxonomy = pd.read_csv(filepath, **kwargs).loc[:, taxonomy_columns]
tmp_metadata = kwargs.pop('metadata', {})
tmp_metadata.update({'filepath': path.abspath(filepath)})
return cls(taxonomy=tmp_taxonomy, metadata=tmp_metadata, **kwargs) |
@classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> 'RepTaxonomy':
'Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`\n file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Passed to the constructor.\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n '
(taxonomy_frame, new_metadata) = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop('metadata', {})
tmp_metadata.update({'biom': new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs) | 7,499,621,541,870,932,000 | Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`
file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Passed to the constructor.
Returns
-------
Instance of
class:`.RepTaxonomy` | pmaf/biome/essentials/_taxonomy.py | from_biom | mmtechslv/PhyloMAF | python | @classmethod
def from_biom(cls, filepath: str, **kwargs: Any) -> 'RepTaxonomy':
'Factory method to construct a :class:`.RepTaxonomy` from :mod:`biom`\n file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Passed to the constructor.\n\n Returns\n -------\n Instance of\n class:`.RepTaxonomy`\n '
(taxonomy_frame, new_metadata) = cls.__load_biom(filepath, **kwargs)
tmp_metadata = kwargs.pop('metadata', {})
tmp_metadata.update({'biom': new_metadata})
return cls(taxonomy=taxonomy_frame, metadata=tmp_metadata, **kwargs) |
@classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[(pd.DataFrame, dict)]:
'Actual private method to process :mod:`biom` file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Compatibility\n '
biom_file = biom.load_table(filepath)
if (biom_file.metadata(axis='observation') is not None):
obs_data = biom_file.metadata_to_dataframe('observation')
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [colname for tax_name in BIOM_TAXONOMY_NAMES for colname in col_names_low if ((colname[::(- 1)].find(tax_name[::(- 1)]) < 3) and (colname[::(- 1)].find(tax_name[::(- 1)]) > (- 1)))]
metadata_cols = [col for col in col_names if (col.lower() not in avail_col_names)]
if (len(avail_col_names) == 1):
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return (taxonomy_frame, tmp_metadata)
else:
raise ValueError('Biom file does not contain observation metadata.') | -3,765,458,142,094,429,000 | Actual private method to process :mod:`biom` file.
Parameters
----------
filepath
:mod:`biom` file path.
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | __load_biom | mmtechslv/PhyloMAF | python | @classmethod
def __load_biom(cls, filepath: str, **kwargs: Any) -> Tuple[(pd.DataFrame, dict)]:
'Actual private method to process :mod:`biom` file.\n\n Parameters\n ----------\n filepath\n :mod:`biom` file path.\n kwargs\n Compatibility\n '
biom_file = biom.load_table(filepath)
if (biom_file.metadata(axis='observation') is not None):
obs_data = biom_file.metadata_to_dataframe('observation')
col_names = list(obs_data.columns.values)
col_names_low = [col.lower() for col in col_names]
avail_col_names = [colname for tax_name in BIOM_TAXONOMY_NAMES for colname in col_names_low if ((colname[::(- 1)].find(tax_name[::(- 1)]) < 3) and (colname[::(- 1)].find(tax_name[::(- 1)]) > (- 1)))]
metadata_cols = [col for col in col_names if (col.lower() not in avail_col_names)]
if (len(avail_col_names) == 1):
tmp_col_index = col_names_low.index(avail_col_names[0])
taxonomy_frame = obs_data[col_names[tmp_col_index]]
else:
taxonomy_frame = obs_data
tmp_metadata = obs_data.loc[:, metadata_cols].to_dict()
return (taxonomy_frame, tmp_metadata)
else:
raise ValueError('Biom file does not contain observation metadata.') |
def _remove_features_by_id(self, ids: AnyGenericIdentifier, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features by features ids and ratify action.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n '
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if (len(tmp_ids) > 0):
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action('_remove_features_by_id', ids, **kwargs) | 8,831,458,497,025,449,000 | Remove features by features ids and ratify action.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | _remove_features_by_id | mmtechslv/PhyloMAF | python | def _remove_features_by_id(self, ids: AnyGenericIdentifier, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features by features ids and ratify action.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n '
tmp_ids = np.asarray(ids, dtype=self.__internal_taxonomy.index.dtype)
if (len(tmp_ids) > 0):
self.__internal_taxonomy.drop(tmp_ids, inplace=True)
return self._ratify_action('_remove_features_by_id', ids, **kwargs) |
def _merge_features_by_map(self, map_dict: Mapper, done: bool=False, **kwargs: Any) -> Optional[Mapper]:
'Merge features and ratify action.\n\n Parameters\n ----------\n map_dict\n Map to use for merging\n done\n Whether merging was completed or not. Compatibility.\n kwargs\n Compatibility\n '
if (not done):
raise NotImplementedError
if map_dict:
return self._ratify_action('_merge_features_by_map', map_dict, _annotations=self.__internal_taxonomy.loc[:, 'lineage'].to_dict(), **kwargs) | -9,112,999,547,379,325,000 | Merge features and ratify action.
Parameters
----------
map_dict
Map to use for merging
done
Whether merging was completed or not. Compatibility.
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | _merge_features_by_map | mmtechslv/PhyloMAF | python | def _merge_features_by_map(self, map_dict: Mapper, done: bool=False, **kwargs: Any) -> Optional[Mapper]:
'Merge features and ratify action.\n\n Parameters\n ----------\n map_dict\n Map to use for merging\n done\n Whether merging was completed or not. Compatibility.\n kwargs\n Compatibility\n '
if (not done):
raise NotImplementedError
if map_dict:
return self._ratify_action('_merge_features_by_map', map_dict, _annotations=self.__internal_taxonomy.loc[:, 'lineage'].to_dict(), **kwargs) |
def drop_feature_by_id(self, ids: AnyGenericIdentifier, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features by feature `ids`.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n '
target_ids = np.asarray(ids)
if (self.xrid.isin(target_ids).sum() == len(target_ids)):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError('Invalid feature ids are provided.') | 7,402,227,226,511,179,000 | Remove features by feature `ids`.
Parameters
----------
ids
Feature identifiers
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | drop_feature_by_id | mmtechslv/PhyloMAF | python | def drop_feature_by_id(self, ids: AnyGenericIdentifier, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features by feature `ids`.\n\n Parameters\n ----------\n ids\n Feature identifiers\n kwargs\n Compatibility\n '
target_ids = np.asarray(ids)
if (self.xrid.isin(target_ids).sum() == len(target_ids)):
return self._remove_features_by_id(target_ids, **kwargs)
else:
raise ValueError('Invalid feature ids are provided.') |
def get_taxonomy_by_id(self, ids: Optional[AnyGenericIdentifier]=None) -> pd.DataFrame:
'Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n\n Returns\n -------\n class:`pandas.DataFrame` with taxonomy data\n '
if (ids is None):
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if (self.xrid.isin(target_ids).sum() <= len(target_ids)):
return self.__internal_taxonomy.loc[(target_ids, self.__avail_ranks)]
else:
raise ValueError('Invalid feature ids are provided.') | -7,007,649,516,778,548,000 | Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
Returns
-------
class:`pandas.DataFrame` with taxonomy data | pmaf/biome/essentials/_taxonomy.py | get_taxonomy_by_id | mmtechslv/PhyloMAF | python | def get_taxonomy_by_id(self, ids: Optional[AnyGenericIdentifier]=None) -> pd.DataFrame:
'Get taxonomy :class:`~pandas.DataFrame` by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n\n Returns\n -------\n class:`pandas.DataFrame` with taxonomy data\n '
if (ids is None):
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
if (self.xrid.isin(target_ids).sum() <= len(target_ids)):
return self.__internal_taxonomy.loc[(target_ids, self.__avail_ranks)]
else:
raise ValueError('Invalid feature ids are provided.') |
def get_lineage_by_id(self, ids: Optional[AnyGenericIdentifier]=None, missing_rank: bool=False, desired_ranks: Union[(bool, Sequence[str])]=False, drop_ranks: Union[(bool, Sequence[str])]=False, **kwargs: Any) -> pd.Series:
'Get taxonomy lineages by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n missing_rank\n If True will generate prefix like `s__` or `d__`\n desired_ranks\n List of desired ranks to generate.\n If False then will generate all main ranks\n drop_ranks\n List of ranks to drop from desired ranks.\n This parameter only useful if `missing_rank` is True\n kwargs\n Compatibility.\n\n Returns\n -------\n class:`pandas.Series` with consensus lineages and corresponding IDs\n '
if (ids is None):
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = (VALID_RANKS if (desired_ranks is False) else desired_ranks)
total_valid_rids = self.xrid.isin(target_ids).sum()
if (total_valid_rids == len(target_ids)):
return generate_lineages_from_taxa(self.__internal_taxonomy.loc[target_ids], missing_rank, tmp_desired_ranks, drop_ranks)
elif (total_valid_rids < len(target_ids)):
return generate_lineages_from_taxa(self.__internal_taxonomy.loc[np.unique(target_ids)], missing_rank, tmp_desired_ranks, drop_ranks)
else:
raise ValueError('Invalid feature ids are provided.') | -8,675,357,545,532,627,000 | Get taxonomy lineages by feature `ids`.
Parameters
----------
ids
Either feature indices or None for all.
missing_rank
If True will generate prefix like `s__` or `d__`
desired_ranks
List of desired ranks to generate.
If False then will generate all main ranks
drop_ranks
List of ranks to drop from desired ranks.
This parameter only useful if `missing_rank` is True
kwargs
Compatibility.
Returns
-------
class:`pandas.Series` with consensus lineages and corresponding IDs | pmaf/biome/essentials/_taxonomy.py | get_lineage_by_id | mmtechslv/PhyloMAF | python | def get_lineage_by_id(self, ids: Optional[AnyGenericIdentifier]=None, missing_rank: bool=False, desired_ranks: Union[(bool, Sequence[str])]=False, drop_ranks: Union[(bool, Sequence[str])]=False, **kwargs: Any) -> pd.Series:
'Get taxonomy lineages by feature `ids`.\n\n Parameters\n ----------\n ids\n Either feature indices or None for all.\n missing_rank\n If True will generate prefix like `s__` or `d__`\n desired_ranks\n List of desired ranks to generate.\n If False then will generate all main ranks\n drop_ranks\n List of ranks to drop from desired ranks.\n This parameter only useful if `missing_rank` is True\n kwargs\n Compatibility.\n\n Returns\n -------\n class:`pandas.Series` with consensus lineages and corresponding IDs\n '
if (ids is None):
target_ids = self.xrid
else:
target_ids = np.asarray(ids)
tmp_desired_ranks = (VALID_RANKS if (desired_ranks is False) else desired_ranks)
total_valid_rids = self.xrid.isin(target_ids).sum()
if (total_valid_rids == len(target_ids)):
return generate_lineages_from_taxa(self.__internal_taxonomy.loc[target_ids], missing_rank, tmp_desired_ranks, drop_ranks)
elif (total_valid_rids < len(target_ids)):
return generate_lineages_from_taxa(self.__internal_taxonomy.loc[np.unique(target_ids)], missing_rank, tmp_desired_ranks, drop_ranks)
else:
raise ValueError('Invalid feature ids are provided.') |
def find_features_by_pattern(self, pattern_str: str, case_sensitive: bool=False, regex: bool=False) -> np.ndarray:
'Searches for features with taxa that matches `pattern_str`\n\n Parameters\n ----------\n pattern_str\n Pattern to search for\n case_sensitive\n Case sensitive mode\n regex\n Use regular expressions\n\n\n Returns\n -------\n class:`~numpy.ndarray` with indices\n '
return self.__internal_taxonomy[self.__internal_taxonomy.loc[:, 'lineage'].str.contains(pattern_str, case=case_sensitive, regex=regex)].index.values | -5,416,422,725,638,271,000 | Searches for features with taxa that matches `pattern_str`
Parameters
----------
pattern_str
Pattern to search for
case_sensitive
Case sensitive mode
regex
Use regular expressions
Returns
-------
class:`~numpy.ndarray` with indices | pmaf/biome/essentials/_taxonomy.py | find_features_by_pattern | mmtechslv/PhyloMAF | python | def find_features_by_pattern(self, pattern_str: str, case_sensitive: bool=False, regex: bool=False) -> np.ndarray:
'Searches for features with taxa that matches `pattern_str`\n\n Parameters\n ----------\n pattern_str\n Pattern to search for\n case_sensitive\n Case sensitive mode\n regex\n Use regular expressions\n\n\n Returns\n -------\n class:`~numpy.ndarray` with indices\n '
return self.__internal_taxonomy[self.__internal_taxonomy.loc[:, 'lineage'].str.contains(pattern_str, case=case_sensitive, regex=regex)].index.values |
def drop_features_without_taxa(self, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features that do not contain taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n '
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs) | -3,912,643,530,907,570,700 | Remove features that do not contain taxonomy.
Parameters
----------
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | drop_features_without_taxa | mmtechslv/PhyloMAF | python | def drop_features_without_taxa(self, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features that do not contain taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n '
ids_to_drop = self.find_features_without_taxa()
return self._remove_features_by_id(ids_to_drop, **kwargs) |
def drop_features_without_ranks(self, ranks: Sequence[str], any: bool=False, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features that do not contain `ranks`\n\n Parameters\n ----------\n ranks\n Ranks to look for\n any\n If True removes feature with single occurrence of missing rank.\n If False all `ranks` must be missing.\n kwargs\n Compatibility\n '
target_ranks = np.asarray(ranks)
if (self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(target_ranks)):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1))
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError('Invalid ranks are provided.') | -4,045,011,512,282,671,000 | Remove features that do not contain `ranks`
Parameters
----------
ranks
Ranks to look for
any
If True removes feature with single occurrence of missing rank.
If False all `ranks` must be missing.
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | drop_features_without_ranks | mmtechslv/PhyloMAF | python | def drop_features_without_ranks(self, ranks: Sequence[str], any: bool=False, **kwargs: Any) -> Optional[AnyGenericIdentifier]:
'Remove features that do not contain `ranks`\n\n Parameters\n ----------\n ranks\n Ranks to look for\n any\n If True removes feature with single occurrence of missing rank.\n If False all `ranks` must be missing.\n kwargs\n Compatibility\n '
target_ranks = np.asarray(ranks)
if (self.__internal_taxonomy.columns.isin(target_ranks).sum() == len(target_ranks)):
no_rank_mask = self.__internal_taxonomy.loc[:, ranks].isna()
no_rank_mask_adjusted = (no_rank_mask.any(axis=1) if any else no_rank_mask.all(axis=1))
ids_to_drop = self.__internal_taxonomy.loc[no_rank_mask_adjusted].index
return self._remove_features_by_id(ids_to_drop, **kwargs)
else:
raise ValueError('Invalid ranks are provided.') |
def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
'Merge features with duplicated taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n '
ret = {}
groupby = self.__internal_taxonomy.groupby('lineage')
if any([(len(group) > 1) for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for (lineage, feature_ids) in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(pd.Series(data=tmp_feature_lineage, index=group_indices))
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs) | -6,375,148,192,394,624,000 | Merge features with duplicated taxonomy.
Parameters
----------
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | merge_duplicated_features | mmtechslv/PhyloMAF | python | def merge_duplicated_features(self, **kwargs: Any) -> Optional[Mapper]:
'Merge features with duplicated taxonomy.\n\n Parameters\n ----------\n kwargs\n Compatibility\n '
ret = {}
groupby = self.__internal_taxonomy.groupby('lineage')
if any([(len(group) > 1) for group in groupby.groups.values()]):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groupby.groups)))
for (lineage, feature_ids) in groupby.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(pd.Series(data=tmp_feature_lineage, index=group_indices))
ret = dict(zip(group_indices, tmp_groups))
return self._merge_features_by_map(ret, True, **kwargs) |
def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
'Merge features by taxonomic rank/level.\n\n Parameters\n ----------\n level\n Taxonomic rank/level to use for merging.\n kwargs\n Compatibility\n '
ret = {}
if (not isinstance(level, str)):
raise TypeError('`rank` must have str type.')
if (level in self.__avail_ranks):
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(self.__internal_taxonomy, False, target_ranks, False)
groups = tmp_lineages.groupby(tmp_lineages)
if (len(groups.groups) > 1):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for (lineage, feature_ids) in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(pd.Series(data=tmp_feature_lineage, index=group_indices))
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError('Invalid rank are provided.')
return self._merge_features_by_map(ret, True, **kwargs) | -6,746,294,497,393,013,000 | Merge features by taxonomic rank/level.
Parameters
----------
level
Taxonomic rank/level to use for merging.
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | merge_features_by_rank | mmtechslv/PhyloMAF | python | def merge_features_by_rank(self, level: str, **kwargs: Any) -> Optional[Mapper]:
'Merge features by taxonomic rank/level.\n\n Parameters\n ----------\n level\n Taxonomic rank/level to use for merging.\n kwargs\n Compatibility\n '
ret = {}
if (not isinstance(level, str)):
raise TypeError('`rank` must have str type.')
if (level in self.__avail_ranks):
target_ranks = get_rank_upto(self.avail_ranks, level, True)
if target_ranks:
tmp_lineages = generate_lineages_from_taxa(self.__internal_taxonomy, False, target_ranks, False)
groups = tmp_lineages.groupby(tmp_lineages)
if (len(groups.groups) > 1):
tmp_feature_lineage = []
tmp_groups = []
group_indices = list(range(len(groups.groups)))
for (lineage, feature_ids) in groups.groups.items():
tmp_feature_lineage.append(lineage)
tmp_groups.append(list(feature_ids))
self.__init_internal_taxonomy(pd.Series(data=tmp_feature_lineage, index=group_indices))
ret = dict(zip(group_indices, tmp_groups))
else:
raise ValueError('Invalid rank are provided.')
return self._merge_features_by_map(ret, True, **kwargs) |
def find_features_without_taxa(self) -> np.ndarray:
'Find features without taxa.\n\n Returns\n -------\n class:`~numpy.ndarray` with feature indices.\n '
return self.__internal_taxonomy.loc[(self.__internal_taxonomy.loc[:, VALID_RANKS].agg((lambda rank: len(''.join(map((lambda x: str((x or ''))), rank)))), axis=1) < 1)].index.values | -1,638,993,383,532,893,200 | Find features without taxa.
Returns
-------
class:`~numpy.ndarray` with feature indices. | pmaf/biome/essentials/_taxonomy.py | find_features_without_taxa | mmtechslv/PhyloMAF | python | def find_features_without_taxa(self) -> np.ndarray:
'Find features without taxa.\n\n Returns\n -------\n class:`~numpy.ndarray` with feature indices.\n '
return self.__internal_taxonomy.loc[(self.__internal_taxonomy.loc[:, VALID_RANKS].agg((lambda rank: len(.join(map((lambda x: str((x or ))), rank)))), axis=1) < 1)].index.values |
def get_subset(self, rids: Optional[AnyGenericIdentifier]=None, *args, **kwargs: Any) -> 'RepTaxonomy':
'Get subset of the :class:`.RepTaxonomy`.\n\n Parameters\n ----------\n rids\n Feature identifiers.\n args\n Compatibility\n kwargs\n Compatibility\n\n Returns\n -------\n class:`.RepTaxonomy`\n '
if (rids is None):
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if (not (self.xrid.isin(target_rids).sum() == len(target_rids))):
raise ValueError('Invalid feature ids are provided.')
return type(self)(taxonomy=self.__internal_taxonomy.loc[(target_rids, 'lineage')], metadata=self.metadata, name=self.name) | -2,655,500,982,097,245,000 | Get subset of the :class:`.RepTaxonomy`.
Parameters
----------
rids
Feature identifiers.
args
Compatibility
kwargs
Compatibility
Returns
-------
class:`.RepTaxonomy` | pmaf/biome/essentials/_taxonomy.py | get_subset | mmtechslv/PhyloMAF | python | def get_subset(self, rids: Optional[AnyGenericIdentifier]=None, *args, **kwargs: Any) -> 'RepTaxonomy':
'Get subset of the :class:`.RepTaxonomy`.\n\n Parameters\n ----------\n rids\n Feature identifiers.\n args\n Compatibility\n kwargs\n Compatibility\n\n Returns\n -------\n class:`.RepTaxonomy`\n '
if (rids is None):
target_rids = self.xrid
else:
target_rids = np.asarray(rids).astype(self.__internal_taxonomy.index.dtype)
if (not (self.xrid.isin(target_rids).sum() == len(target_rids))):
raise ValueError('Invalid feature ids are provided.')
return type(self)(taxonomy=self.__internal_taxonomy.loc[(target_rids, 'lineage')], metadata=self.metadata, name=self.name) |
def _export(self, taxlike: str='lineage', ascending: bool=True, **kwargs: Any) -> Tuple[(pd.Series, dict)]:
'Creates taxonomy for export.\n\n Parameters\n ----------\n taxlike\n Generate taxonomy in format(currently only `lineage` is supported.)\n ascending\n Sorting\n kwargs\n Compatibility\n '
if (taxlike == 'lineage'):
return (self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending), kwargs)
else:
raise NotImplemented | -8,751,291,473,556,460,000 | Creates taxonomy for export.
Parameters
----------
taxlike
Generate taxonomy in format(currently only `lineage` is supported.)
ascending
Sorting
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | _export | mmtechslv/PhyloMAF | python | def _export(self, taxlike: str='lineage', ascending: bool=True, **kwargs: Any) -> Tuple[(pd.Series, dict)]:
'Creates taxonomy for export.\n\n Parameters\n ----------\n taxlike\n Generate taxonomy in format(currently only `lineage` is supported.)\n ascending\n Sorting\n kwargs\n Compatibility\n '
if (taxlike == 'lineage'):
return (self.get_lineage_by_id(**kwargs).sort_values(ascending=ascending), kwargs)
else:
raise NotImplemented |
def export(self, output_fp: str, *args, _add_ext: bool=False, sep: str=',', **kwargs: Any) -> None:
'Exports the taxonomy into the specified file.\n\n Parameters\n ----------\n output_fp\n Export filepath\n args\n Compatibility\n _add_ext\n Add file extension or not.\n sep\n Delimiter\n kwargs\n Compatibility\n '
(tmp_export, rkwarg) = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv('{}.csv'.format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep) | -1,972,528,056,840,844,000 | Exports the taxonomy into the specified file.
Parameters
----------
output_fp
Export filepath
args
Compatibility
_add_ext
Add file extension or not.
sep
Delimiter
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | export | mmtechslv/PhyloMAF | python | def export(self, output_fp: str, *args, _add_ext: bool=False, sep: str=',', **kwargs: Any) -> None:
'Exports the taxonomy into the specified file.\n\n Parameters\n ----------\n output_fp\n Export filepath\n args\n Compatibility\n _add_ext\n Add file extension or not.\n sep\n Delimiter\n kwargs\n Compatibility\n '
(tmp_export, rkwarg) = self._export(*args, **kwargs)
if _add_ext:
tmp_export.to_csv('{}.csv'.format(output_fp), sep=sep)
else:
tmp_export.to_csv(output_fp, sep=sep) |
def copy(self) -> 'RepTaxonomy':
'Copy of the instance.'
return type(self)(taxonomy=self.__internal_taxonomy.loc[:, 'lineage'], metadata=self.metadata, name=self.name) | -1,546,243,708,499,643,100 | Copy of the instance. | pmaf/biome/essentials/_taxonomy.py | copy | mmtechslv/PhyloMAF | python | def copy(self) -> 'RepTaxonomy':
return type(self)(taxonomy=self.__internal_taxonomy.loc[:, 'lineage'], metadata=self.metadata, name=self.name) |
def __fix_taxon_names(self) -> None:
'Fix invalid taxon names.'
def taxon_fixer(taxon):
if ((taxon is not None) and pd.notna(taxon)):
tmp_taxon_trimmed = taxon.lower().strip()
if (len(tmp_taxon_trimmed) > 0):
if (tmp_taxon_trimmed[0] == '['):
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if (tmp_taxon_trimmed[(- 1)] == ']'):
tmp_taxon_trimmed = tmp_taxon_trimmed[:(- 1)]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[:, VALID_RANKS].applymap(taxon_fixer) | -3,647,114,907,237,961,000 | Fix invalid taxon names. | pmaf/biome/essentials/_taxonomy.py | __fix_taxon_names | mmtechslv/PhyloMAF | python | def __fix_taxon_names(self) -> None:
def taxon_fixer(taxon):
if ((taxon is not None) and pd.notna(taxon)):
tmp_taxon_trimmed = taxon.lower().strip()
if (len(tmp_taxon_trimmed) > 0):
if (tmp_taxon_trimmed[0] == '['):
tmp_taxon_trimmed = tmp_taxon_trimmed[1:]
if (tmp_taxon_trimmed[(- 1)] == ']'):
tmp_taxon_trimmed = tmp_taxon_trimmed[:(- 1)]
return tmp_taxon_trimmed.capitalize()
else:
return None
else:
return None
self.__internal_taxonomy.loc[:, VALID_RANKS] = self.__internal_taxonomy.loc[:, VALID_RANKS].applymap(taxon_fixer) |
def __reconstruct_internal_lineages(self) -> None:
'Reconstruct the internal lineages.'
self.__internal_taxonomy.loc[:, 'lineage'] = generate_lineages_from_taxa(self.__internal_taxonomy, True, self.__avail_ranks, False) | -2,363,896,853,004,943,000 | Reconstruct the internal lineages. | pmaf/biome/essentials/_taxonomy.py | __reconstruct_internal_lineages | mmtechslv/PhyloMAF | python | def __reconstruct_internal_lineages(self) -> None:
self.__internal_taxonomy.loc[:, 'lineage'] = generate_lineages_from_taxa(self.__internal_taxonomy, True, self.__avail_ranks, False) |
def __init_internal_taxonomy(self, taxonomy_data: Union[(pd.Series, pd.DataFrame)], taxonomy_notation: Optional[str]='greengenes', order_ranks: Optional[Sequence[str]]=None, **kwargs: Any) -> None:
"Main method to initialize taxonomy.\n\n Parameters\n ----------\n taxonomy_data\n Incoming parsed taxonomy data\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of\n :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None.\n The 'silva' notation require `order_ranks`.\n kwargs\n Compatibility\n "
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(taxonomy_data, taxonomy_notation, order_ranks)
elif isinstance(taxonomy_data, pd.DataFrame):
if (taxonomy_data.shape[1] == 1):
taxonomy_data_series = pd.Series(data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index)
new_taxonomy = self.__init_taxonomy_from_lineages(taxonomy_data_series, taxonomy_notation, order_ranks)
else:
new_taxonomy = self.__init_taxonomy_from_frame(taxonomy_data, taxonomy_notation, order_ranks)
else:
raise RuntimeError('`taxonomy_data` must be either pd.Series or pd.Dataframe')
if (new_taxonomy is None):
raise ValueError('Provided taxonomy is invalid.')
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names()
tmp_avail_ranks = [rank for rank in VALID_RANKS if (rank in new_taxonomy.columns)]
self.__avail_ranks = [rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()]
self.__reconstruct_internal_lineages()
self._init_state = True | -6,238,787,448,559,007,000 | Main method to initialize taxonomy.
Parameters
----------
taxonomy_data
Incoming parsed taxonomy data
taxonomy_notation
Taxonomy lineage notation style. Can be one of
:const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None.
The 'silva' notation require `order_ranks`.
kwargs
Compatibility | pmaf/biome/essentials/_taxonomy.py | __init_internal_taxonomy | mmtechslv/PhyloMAF | python | def __init_internal_taxonomy(self, taxonomy_data: Union[(pd.Series, pd.DataFrame)], taxonomy_notation: Optional[str]='greengenes', order_ranks: Optional[Sequence[str]]=None, **kwargs: Any) -> None:
"Main method to initialize taxonomy.\n\n Parameters\n ----------\n taxonomy_data\n Incoming parsed taxonomy data\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of\n :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None.\n The 'silva' notation require `order_ranks`.\n kwargs\n Compatibility\n "
if isinstance(taxonomy_data, pd.Series):
new_taxonomy = self.__init_taxonomy_from_lineages(taxonomy_data, taxonomy_notation, order_ranks)
elif isinstance(taxonomy_data, pd.DataFrame):
if (taxonomy_data.shape[1] == 1):
taxonomy_data_series = pd.Series(data=taxonomy_data.iloc[:, 0], index=taxonomy_data.index)
new_taxonomy = self.__init_taxonomy_from_lineages(taxonomy_data_series, taxonomy_notation, order_ranks)
else:
new_taxonomy = self.__init_taxonomy_from_frame(taxonomy_data, taxonomy_notation, order_ranks)
else:
raise RuntimeError('`taxonomy_data` must be either pd.Series or pd.Dataframe')
if (new_taxonomy is None):
raise ValueError('Provided taxonomy is invalid.')
self.__internal_taxonomy = new_taxonomy
self.__fix_taxon_names()
tmp_avail_ranks = [rank for rank in VALID_RANKS if (rank in new_taxonomy.columns)]
self.__avail_ranks = [rank for rank in tmp_avail_ranks if new_taxonomy.loc[:, rank].notna().any()]
self.__reconstruct_internal_lineages()
self._init_state = True |
def __init_taxonomy_from_lineages(self, taxonomy_series: pd.Series, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]]) -> pd.DataFrame:
"Main method that produces taxonomy dataframe from lineages.\n\n Parameters\n ----------\n taxonomy_series\n :class:`pandas.Series` with taxonomy lineages\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n "
if (taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS):
notation = taxonomy_notation
else:
sample_taxon = taxonomy_series.iloc[0]
notation = indentify_taxon_notation(sample_taxon)
if (order_ranks is not None):
if all([(rank in VALID_RANKS) for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if (notation == 'greengenes'):
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {elem[0]: elem[1] for elem in tmp_lineage if (elem[0] in VALID_RANKS)}
for rank in VALID_RANKS:
if (rank not in tmp_taxa_dict.keys()):
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append(([None] + tmp_taxa_ordered))
taxonomy = pd.DataFrame(index=ordered_indices_list, data=ordered_taxa_list, columns=(['lineage'] + VALID_RANKS))
return taxonomy
elif (notation == 'qiime'):
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=(lambda x: x[0]))
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for (rank, taxon) in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [rank for rank in target_order_ranks[::(- 1)][:len(tmp_ranks)]][::(- 1)]
for rank in VALID_RANKS:
if (rank not in tmp_taxonomy_df.columns):
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif (notation == 'silva'):
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(';')
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for (rank_i, taxon) in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [rank for rank in target_order_ranks if (rank in VALID_RANKS)]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [rank for rank in target_order_ranks[::(- 1)][:len(tmp_ranks)]][::(- 1)]
for rank in VALID_RANKS:
if (rank not in tmp_taxonomy_df.columns):
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError | 5,385,803,232,418,509,000 | Main method that produces taxonomy dataframe from lineages.
Parameters
----------
taxonomy_series
:class:`pandas.Series` with taxonomy lineages
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`. | pmaf/biome/essentials/_taxonomy.py | __init_taxonomy_from_lineages | mmtechslv/PhyloMAF | python | def __init_taxonomy_from_lineages(self, taxonomy_series: pd.Series, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]]) -> pd.DataFrame:
"Main method that produces taxonomy dataframe from lineages.\n\n Parameters\n ----------\n taxonomy_series\n :class:`pandas.Series` with taxonomy lineages\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n "
if (taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS):
notation = taxonomy_notation
else:
sample_taxon = taxonomy_series.iloc[0]
notation = indentify_taxon_notation(sample_taxon)
if (order_ranks is not None):
if all([(rank in VALID_RANKS) for rank in order_ranks]):
target_order_ranks = order_ranks
else:
raise NotImplementedError
else:
target_order_ranks = VALID_RANKS
if (notation == 'greengenes'):
lineages = taxonomy_series.reset_index().values.tolist()
ordered_taxa_list = []
ordered_indices_list = [elem[0] for elem in lineages]
for lineage in lineages:
tmp_lineage = jRegexGG.findall(lineage[1])
tmp_taxa_dict = {elem[0]: elem[1] for elem in tmp_lineage if (elem[0] in VALID_RANKS)}
for rank in VALID_RANKS:
if (rank not in tmp_taxa_dict.keys()):
tmp_taxa_dict.update({rank: None})
tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]
ordered_taxa_list.append(([None] + tmp_taxa_ordered))
taxonomy = pd.DataFrame(index=ordered_indices_list, data=ordered_taxa_list, columns=(['lineage'] + VALID_RANKS))
return taxonomy
elif (notation == 'qiime'):
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = jRegexQIIME.findall(lineage[1])
tmp_lineage.sort(key=(lambda x: x[0]))
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for (rank, taxon) in tmp_lineage:
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]
tmp_taxonomy_df.columns = [rank for rank in target_order_ranks[::(- 1)][:len(tmp_ranks)]][::(- 1)]
for rank in VALID_RANKS:
if (rank not in tmp_taxonomy_df.columns):
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
elif (notation == 'silva'):
lineages = taxonomy_series.reset_index().values.tolist()
tmp_taxa_dict_list = []
tmp_ranks = set()
for lineage in lineages:
tmp_lineage = lineage[1].split(';')
tmp_taxa_dict = defaultdict(None)
tmp_taxa_dict[None] = lineage[0]
for (rank_i, taxon) in enumerate(tmp_lineage):
rank = target_order_ranks[rank_i]
tmp_taxa_dict[rank] = taxon
tmp_ranks.add(rank)
tmp_taxa_dict_list.append(dict(tmp_taxa_dict))
tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)
tmp_taxonomy_df.set_index(None, inplace=True)
tmp_rank_ordered = [rank for rank in target_order_ranks if (rank in VALID_RANKS)]
tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]
tmp_taxonomy_df.columns = [rank for rank in target_order_ranks[::(- 1)][:len(tmp_ranks)]][::(- 1)]
for rank in VALID_RANKS:
if (rank not in tmp_taxonomy_df.columns):
tmp_taxonomy_df.loc[:, rank] = None
return tmp_taxonomy_df
else:
raise NotImplementedError |
def __init_taxonomy_from_frame(self, taxonomy_dataframe: pd.DataFrame, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]]) -> pd.DataFrame:
"Main method that produces taxonomy sheet from dataframe.\n\n Parameters\n ----------\n taxonomy_dataframe\n :class:`~pandas.DataFrame` with taxa split by ranks.\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n "
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if (valid_ranks is not None):
if (len(valid_ranks) > 0):
return pd.concat([taxonomy_dataframe, pd.DataFrame(data='', index=taxonomy_dataframe.index, columns=[rank for rank in VALID_RANKS if (rank not in valid_ranks)])], axis=1)
else:
taxonomy_series = taxonomy_dataframe.apply((lambda taxa: ';'.join(taxa.values.tolist())), axis=1)
return self.__init_taxonomy_from_lineages(taxonomy_series, taxonomy_notation, order_ranks)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply((lambda taxa: ';'.join([(t if isinstance(t, str) else '') for t in taxa.values])), axis=1)
return self.__init_taxonomy_from_lineages(taxonomy_series, taxonomy_notation, order_ranks) | -2,655,459,836,417,692,000 | Main method that produces taxonomy sheet from dataframe.
Parameters
----------
taxonomy_dataframe
:class:`~pandas.DataFrame` with taxa split by ranks.
taxonomy_notation
Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`
order_ranks
List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.
Returns
-------
:class:`~pandas.DataFrame` | pmaf/biome/essentials/_taxonomy.py | __init_taxonomy_from_frame | mmtechslv/PhyloMAF | python | def __init_taxonomy_from_frame(self, taxonomy_dataframe: pd.DataFrame, taxonomy_notation: Optional[str], order_ranks: Optional[Sequence[str]]) -> pd.DataFrame:
"Main method that produces taxonomy sheet from dataframe.\n\n Parameters\n ----------\n taxonomy_dataframe\n :class:`~pandas.DataFrame` with taxa split by ranks.\n taxonomy_notation\n Taxonomy lineage notation style. Can be one of :const:`pmaf.internals._constants.AVAIL_TAXONOMY_NOTATIONS`\n order_ranks\n List with the target rank order. Default is set to None. The 'silva' notation require `order_ranks`.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n "
valid_ranks = extract_valid_ranks(taxonomy_dataframe.columns, VALID_RANKS)
if (valid_ranks is not None):
if (len(valid_ranks) > 0):
return pd.concat([taxonomy_dataframe, pd.DataFrame(data=, index=taxonomy_dataframe.index, columns=[rank for rank in VALID_RANKS if (rank not in valid_ranks)])], axis=1)
else:
taxonomy_series = taxonomy_dataframe.apply((lambda taxa: ';'.join(taxa.values.tolist())), axis=1)
return self.__init_taxonomy_from_lineages(taxonomy_series, taxonomy_notation, order_ranks)
else:
valid_ranks = cols2ranks(taxonomy_dataframe.columns)
taxonomy_dataframe.columns = valid_ranks
taxonomy_series = taxonomy_dataframe.apply((lambda taxa: ';'.join([(t if isinstance(t, str) else ) for t in taxa.values])), axis=1)
return self.__init_taxonomy_from_lineages(taxonomy_series, taxonomy_notation, order_ranks) |
@property
def avail_ranks(self) -> Sequence[str]:
'List of available taxonomic ranks.'
return self.__avail_ranks | 6,488,279,862,083,200,000 | List of available taxonomic ranks. | pmaf/biome/essentials/_taxonomy.py | avail_ranks | mmtechslv/PhyloMAF | python | @property
def avail_ranks(self) -> Sequence[str]:
return self.__avail_ranks |
@property
def duplicated(self) -> pd.Index:
'List of duplicated feature indices.'
return self.__internal_taxonomy.index[self.__internal_taxonomy['lineage'].duplicated(keep=False)] | -2,149,236,326,325,262,300 | List of duplicated feature indices. | pmaf/biome/essentials/_taxonomy.py | duplicated | mmtechslv/PhyloMAF | python | @property
def duplicated(self) -> pd.Index:
return self.__internal_taxonomy.index[self.__internal_taxonomy['lineage'].duplicated(keep=False)] |
@property
def data(self) -> pd.DataFrame:
'Actual data representation as pd.DataFrame.'
return self.__internal_taxonomy | 5,149,025,861,175,812,000 | Actual data representation as pd.DataFrame. | pmaf/biome/essentials/_taxonomy.py | data | mmtechslv/PhyloMAF | python | @property
def data(self) -> pd.DataFrame:
return self.__internal_taxonomy |
@property
def xrid(self) -> pd.Index:
'Feature indices as pd.Index.'
return self.__internal_taxonomy.index | 4,945,130,114,201,169,000 | Feature indices as pd.Index. | pmaf/biome/essentials/_taxonomy.py | xrid | mmtechslv/PhyloMAF | python | @property
def xrid(self) -> pd.Index:
return self.__internal_taxonomy.index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.