repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
sdispater/eloquent | eloquent/dbal/comparator.py | Comparator.detect_column_renamings | def detect_column_renamings(self, table_differences):
"""
Try to find columns that only changed their names.
:type table_differences: TableDiff
"""
rename_candidates = {}
for added_column_name, added_column in table_differences.added_columns.items():
for removed_column in table_differences.removed_columns.values():
if len(self.diff_column(added_column, removed_column)) == 0:
if added_column.get_name() not in rename_candidates:
rename_candidates[added_column.get_name()] = []
rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name)
for candidate_columns in rename_candidates.values():
if len(candidate_columns) == 1:
removed_column, added_column, _ = candidate_columns[0]
removed_column_name = removed_column.get_name().lower()
added_column_name = added_column.get_name().lower()
if removed_column_name not in table_differences.renamed_columns:
table_differences.renamed_columns[removed_column_name] = added_column
del table_differences.added_columns[added_column_name]
del table_differences.removed_columns[removed_column_name] | python | def detect_column_renamings(self, table_differences):
"""
Try to find columns that only changed their names.
:type table_differences: TableDiff
"""
rename_candidates = {}
for added_column_name, added_column in table_differences.added_columns.items():
for removed_column in table_differences.removed_columns.values():
if len(self.diff_column(added_column, removed_column)) == 0:
if added_column.get_name() not in rename_candidates:
rename_candidates[added_column.get_name()] = []
rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name)
for candidate_columns in rename_candidates.values():
if len(candidate_columns) == 1:
removed_column, added_column, _ = candidate_columns[0]
removed_column_name = removed_column.get_name().lower()
added_column_name = added_column.get_name().lower()
if removed_column_name not in table_differences.renamed_columns:
table_differences.renamed_columns[removed_column_name] = added_column
del table_differences.added_columns[added_column_name]
del table_differences.removed_columns[removed_column_name] | [
"def",
"detect_column_renamings",
"(",
"self",
",",
"table_differences",
")",
":",
"rename_candidates",
"=",
"{",
"}",
"for",
"added_column_name",
",",
"added_column",
"in",
"table_differences",
".",
"added_columns",
".",
"items",
"(",
")",
":",
"for",
"removed_column",
"in",
"table_differences",
".",
"removed_columns",
".",
"values",
"(",
")",
":",
"if",
"len",
"(",
"self",
".",
"diff_column",
"(",
"added_column",
",",
"removed_column",
")",
")",
"==",
"0",
":",
"if",
"added_column",
".",
"get_name",
"(",
")",
"not",
"in",
"rename_candidates",
":",
"rename_candidates",
"[",
"added_column",
".",
"get_name",
"(",
")",
"]",
"=",
"[",
"]",
"rename_candidates",
"[",
"added_column",
".",
"get_name",
"(",
")",
"]",
"=",
"(",
"removed_column",
",",
"added_column",
",",
"added_column_name",
")",
"for",
"candidate_columns",
"in",
"rename_candidates",
".",
"values",
"(",
")",
":",
"if",
"len",
"(",
"candidate_columns",
")",
"==",
"1",
":",
"removed_column",
",",
"added_column",
",",
"_",
"=",
"candidate_columns",
"[",
"0",
"]",
"removed_column_name",
"=",
"removed_column",
".",
"get_name",
"(",
")",
".",
"lower",
"(",
")",
"added_column_name",
"=",
"added_column",
".",
"get_name",
"(",
")",
".",
"lower",
"(",
")",
"if",
"removed_column_name",
"not",
"in",
"table_differences",
".",
"renamed_columns",
":",
"table_differences",
".",
"renamed_columns",
"[",
"removed_column_name",
"]",
"=",
"added_column",
"del",
"table_differences",
".",
"added_columns",
"[",
"added_column_name",
"]",
"del",
"table_differences",
".",
"removed_columns",
"[",
"removed_column_name",
"]"
] | Try to find columns that only changed their names.
:type table_differences: TableDiff | [
"Try",
"to",
"find",
"columns",
"that",
"only",
"changed",
"their",
"names",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/dbal/comparator.py#L112-L137 |
sdispater/eloquent | eloquent/dbal/comparator.py | Comparator.diff_column | def diff_column(self, column1, column2):
"""
Returns the difference between column1 and column2
:type column1: eloquent.dbal.column.Column
:type column2: eloquent.dbal.column.Column
:rtype: list
"""
properties1 = column1.to_dict()
properties2 = column2.to_dict()
changed_properties = []
for prop in ['type', 'notnull', 'unsigned', 'autoincrement']:
if properties1[prop] != properties2[prop]:
changed_properties.append(prop)
if properties1['default'] != properties2['default']\
or (properties1['default'] is None and properties2['default'] is not None)\
or (properties2['default'] is None and properties1['default'] is not None):
changed_properties.append('default')
if properties1['type'] == 'string' and properties1['type'] != 'guid'\
or properties1['type'] in ['binary', 'blob']:
length1 = properties1['length'] or 255
length2 = properties2['length'] or 255
if length1 != length2:
changed_properties.append('length')
if properties1['fixed'] != properties2['fixed']:
changed_properties.append('fixed')
elif properties1['type'] in ['decimal', 'float', 'double precision']:
precision1 = properties1['precision'] or 10
precision2 = properties2['precision'] or 10
if precision1 != precision2:
changed_properties.append('precision')
if properties1['scale'] != properties2['scale']:
changed_properties.append('scale')
return list(set(changed_properties)) | python | def diff_column(self, column1, column2):
"""
Returns the difference between column1 and column2
:type column1: eloquent.dbal.column.Column
:type column2: eloquent.dbal.column.Column
:rtype: list
"""
properties1 = column1.to_dict()
properties2 = column2.to_dict()
changed_properties = []
for prop in ['type', 'notnull', 'unsigned', 'autoincrement']:
if properties1[prop] != properties2[prop]:
changed_properties.append(prop)
if properties1['default'] != properties2['default']\
or (properties1['default'] is None and properties2['default'] is not None)\
or (properties2['default'] is None and properties1['default'] is not None):
changed_properties.append('default')
if properties1['type'] == 'string' and properties1['type'] != 'guid'\
or properties1['type'] in ['binary', 'blob']:
length1 = properties1['length'] or 255
length2 = properties2['length'] or 255
if length1 != length2:
changed_properties.append('length')
if properties1['fixed'] != properties2['fixed']:
changed_properties.append('fixed')
elif properties1['type'] in ['decimal', 'float', 'double precision']:
precision1 = properties1['precision'] or 10
precision2 = properties2['precision'] or 10
if precision1 != precision2:
changed_properties.append('precision')
if properties1['scale'] != properties2['scale']:
changed_properties.append('scale')
return list(set(changed_properties)) | [
"def",
"diff_column",
"(",
"self",
",",
"column1",
",",
"column2",
")",
":",
"properties1",
"=",
"column1",
".",
"to_dict",
"(",
")",
"properties2",
"=",
"column2",
".",
"to_dict",
"(",
")",
"changed_properties",
"=",
"[",
"]",
"for",
"prop",
"in",
"[",
"'type'",
",",
"'notnull'",
",",
"'unsigned'",
",",
"'autoincrement'",
"]",
":",
"if",
"properties1",
"[",
"prop",
"]",
"!=",
"properties2",
"[",
"prop",
"]",
":",
"changed_properties",
".",
"append",
"(",
"prop",
")",
"if",
"properties1",
"[",
"'default'",
"]",
"!=",
"properties2",
"[",
"'default'",
"]",
"or",
"(",
"properties1",
"[",
"'default'",
"]",
"is",
"None",
"and",
"properties2",
"[",
"'default'",
"]",
"is",
"not",
"None",
")",
"or",
"(",
"properties2",
"[",
"'default'",
"]",
"is",
"None",
"and",
"properties1",
"[",
"'default'",
"]",
"is",
"not",
"None",
")",
":",
"changed_properties",
".",
"append",
"(",
"'default'",
")",
"if",
"properties1",
"[",
"'type'",
"]",
"==",
"'string'",
"and",
"properties1",
"[",
"'type'",
"]",
"!=",
"'guid'",
"or",
"properties1",
"[",
"'type'",
"]",
"in",
"[",
"'binary'",
",",
"'blob'",
"]",
":",
"length1",
"=",
"properties1",
"[",
"'length'",
"]",
"or",
"255",
"length2",
"=",
"properties2",
"[",
"'length'",
"]",
"or",
"255",
"if",
"length1",
"!=",
"length2",
":",
"changed_properties",
".",
"append",
"(",
"'length'",
")",
"if",
"properties1",
"[",
"'fixed'",
"]",
"!=",
"properties2",
"[",
"'fixed'",
"]",
":",
"changed_properties",
".",
"append",
"(",
"'fixed'",
")",
"elif",
"properties1",
"[",
"'type'",
"]",
"in",
"[",
"'decimal'",
",",
"'float'",
",",
"'double precision'",
"]",
":",
"precision1",
"=",
"properties1",
"[",
"'precision'",
"]",
"or",
"10",
"precision2",
"=",
"properties2",
"[",
"'precision'",
"]",
"or",
"10",
"if",
"precision1",
"!=",
"precision2",
":",
"changed_properties",
".",
"append",
"(",
"'precision'",
")",
"if",
"properties1",
"[",
"'scale'",
"]",
"!=",
"properties2",
"[",
"'scale'",
"]",
":",
"changed_properties",
".",
"append",
"(",
"'scale'",
")",
"return",
"list",
"(",
"set",
"(",
"changed_properties",
")",
")"
] | Returns the difference between column1 and column2
:type column1: eloquent.dbal.column.Column
:type column2: eloquent.dbal.column.Column
:rtype: list | [
"Returns",
"the",
"difference",
"between",
"column1",
"and",
"column2"
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/dbal/comparator.py#L139-L182 |
sdispater/eloquent | eloquent/commands/migrations/base_command.py | BaseCommand.execute | def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
config = self._get_config(i)
self._resolver = DatabaseManager(config) | python | def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
config = self._get_config(i)
self._resolver = DatabaseManager(config) | [
"def",
"execute",
"(",
"self",
",",
"i",
",",
"o",
")",
":",
"config",
"=",
"self",
".",
"_get_config",
"(",
"i",
")",
"self",
".",
"_resolver",
"=",
"DatabaseManager",
"(",
"config",
")"
] | Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output | [
"Executes",
"the",
"command",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/commands/migrations/base_command.py#L18-L26 |
sdispater/eloquent | eloquent/commands/migrations/base_command.py | BaseCommand.call | def call(self, name, options=None, o=None):
"""
Call another command.
:param name: The command name
:type name: str
:param options: The options
:type options: list or None
:param o: The output
:type o: cleo.outputs.output.Output
"""
if options is None:
options = []
command = self.get_application().find(name)
options = [('command', command.get_name())] + options
return command.run(ListInput(options), o) | python | def call(self, name, options=None, o=None):
"""
Call another command.
:param name: The command name
:type name: str
:param options: The options
:type options: list or None
:param o: The output
:type o: cleo.outputs.output.Output
"""
if options is None:
options = []
command = self.get_application().find(name)
options = [('command', command.get_name())] + options
return command.run(ListInput(options), o) | [
"def",
"call",
"(",
"self",
",",
"name",
",",
"options",
"=",
"None",
",",
"o",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"[",
"]",
"command",
"=",
"self",
".",
"get_application",
"(",
")",
".",
"find",
"(",
"name",
")",
"options",
"=",
"[",
"(",
"'command'",
",",
"command",
".",
"get_name",
"(",
")",
")",
"]",
"+",
"options",
"return",
"command",
".",
"run",
"(",
"ListInput",
"(",
"options",
")",
",",
"o",
")"
] | Call another command.
:param name: The command name
:type name: str
:param options: The options
:type options: list or None
:param o: The output
:type o: cleo.outputs.output.Output | [
"Call",
"another",
"command",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/commands/migrations/base_command.py#L28-L48 |
sdispater/eloquent | eloquent/commands/migrations/base_command.py | BaseCommand._get_config | def _get_config(self, i):
"""
Get the config.
:type i: cleo.inputs.input.Input
:rtype: dict
"""
variables = {}
if not i.get_option('config'):
raise Exception('The --config|-c option is missing.')
with open(i.get_option('config')) as fh:
exec(fh.read(), {}, variables)
return variables['DATABASES'] | python | def _get_config(self, i):
"""
Get the config.
:type i: cleo.inputs.input.Input
:rtype: dict
"""
variables = {}
if not i.get_option('config'):
raise Exception('The --config|-c option is missing.')
with open(i.get_option('config')) as fh:
exec(fh.read(), {}, variables)
return variables['DATABASES'] | [
"def",
"_get_config",
"(",
"self",
",",
"i",
")",
":",
"variables",
"=",
"{",
"}",
"if",
"not",
"i",
".",
"get_option",
"(",
"'config'",
")",
":",
"raise",
"Exception",
"(",
"'The --config|-c option is missing.'",
")",
"with",
"open",
"(",
"i",
".",
"get_option",
"(",
"'config'",
")",
")",
"as",
"fh",
":",
"exec",
"(",
"fh",
".",
"read",
"(",
")",
",",
"{",
"}",
",",
"variables",
")",
"return",
"variables",
"[",
"'DATABASES'",
"]"
] | Get the config.
:type i: cleo.inputs.input.Input
:rtype: dict | [
"Get",
"the",
"config",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/commands/migrations/base_command.py#L53-L68 |
sdispater/eloquent | eloquent/orm/relations/morph_to.py | MorphTo.associate | def associate(self, model):
"""
Associate the model instance to the given parent.
:type model: eloquent.Model
:rtype: eloquent.Model
"""
self._parent.set_attribute(self._foreign_key, model.get_key())
self._parent.set_attribute(self._morph_type, model.get_morph_class())
return self._parent.set_relation(self._relation, model) | python | def associate(self, model):
"""
Associate the model instance to the given parent.
:type model: eloquent.Model
:rtype: eloquent.Model
"""
self._parent.set_attribute(self._foreign_key, model.get_key())
self._parent.set_attribute(self._morph_type, model.get_morph_class())
return self._parent.set_relation(self._relation, model) | [
"def",
"associate",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_parent",
".",
"set_attribute",
"(",
"self",
".",
"_foreign_key",
",",
"model",
".",
"get_key",
"(",
")",
")",
"self",
".",
"_parent",
".",
"set_attribute",
"(",
"self",
".",
"_morph_type",
",",
"model",
".",
"get_morph_class",
"(",
")",
")",
"return",
"self",
".",
"_parent",
".",
"set_relation",
"(",
"self",
".",
"_relation",
",",
"model",
")"
] | Associate the model instance to the given parent.
:type model: eloquent.Model
:rtype: eloquent.Model | [
"Associate",
"the",
"model",
"instance",
"to",
"the",
"given",
"parent",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_to.py#L79-L90 |
sdispater/eloquent | eloquent/orm/relations/morph_to.py | MorphTo._create_model_by_type | def _create_model_by_type(self, type):
"""
Create a new model instance by type.
:rtype: Model
"""
klass = None
for cls in eloquent.orm.model.Model.__subclasses__():
morph_class = cls.__morph_class__ or cls.__name__
if morph_class == type:
klass = cls
break
return klass() | python | def _create_model_by_type(self, type):
"""
Create a new model instance by type.
:rtype: Model
"""
klass = None
for cls in eloquent.orm.model.Model.__subclasses__():
morph_class = cls.__morph_class__ or cls.__name__
if morph_class == type:
klass = cls
break
return klass() | [
"def",
"_create_model_by_type",
"(",
"self",
",",
"type",
")",
":",
"klass",
"=",
"None",
"for",
"cls",
"in",
"eloquent",
".",
"orm",
".",
"model",
".",
"Model",
".",
"__subclasses__",
"(",
")",
":",
"morph_class",
"=",
"cls",
".",
"__morph_class__",
"or",
"cls",
".",
"__name__",
"if",
"morph_class",
"==",
"type",
":",
"klass",
"=",
"cls",
"break",
"return",
"klass",
"(",
")"
] | Create a new model instance by type.
:rtype: Model | [
"Create",
"a",
"new",
"model",
"instance",
"by",
"type",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_to.py#L154-L167 |
sdispater/eloquent | eloquent/schema/mysql_builder.py | MySqlSchemaBuilder.get_column_listing | def get_column_listing(self, table):
"""
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
"""
sql = self._grammar.compile_column_exists()
database = self._connection.get_database_name()
table = self._connection.get_table_prefix() + table
results = self._connection.select(sql, [database, table])
return self._connection.get_post_processor().process_column_listing(results) | python | def get_column_listing(self, table):
"""
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
"""
sql = self._grammar.compile_column_exists()
database = self._connection.get_database_name()
table = self._connection.get_table_prefix() + table
results = self._connection.select(sql, [database, table])
return self._connection.get_post_processor().process_column_listing(results) | [
"def",
"get_column_listing",
"(",
"self",
",",
"table",
")",
":",
"sql",
"=",
"self",
".",
"_grammar",
".",
"compile_column_exists",
"(",
")",
"database",
"=",
"self",
".",
"_connection",
".",
"get_database_name",
"(",
")",
"table",
"=",
"self",
".",
"_connection",
".",
"get_table_prefix",
"(",
")",
"+",
"table",
"results",
"=",
"self",
".",
"_connection",
".",
"select",
"(",
"sql",
",",
"[",
"database",
",",
"table",
"]",
")",
"return",
"self",
".",
"_connection",
".",
"get_post_processor",
"(",
")",
".",
"process_column_listing",
"(",
"results",
")"
] | Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list | [
"Get",
"the",
"column",
"listing",
"for",
"a",
"given",
"table",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/schema/mysql_builder.py#L23-L38 |
sdispater/eloquent | eloquent/migrations/migration_creator.py | MigrationCreator._populate_stub | def _populate_stub(self, name, stub, table):
"""
Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str
"""
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub | python | def _populate_stub(self, name, stub, table):
"""
Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str
"""
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub | [
"def",
"_populate_stub",
"(",
"self",
",",
"name",
",",
"stub",
",",
"table",
")",
":",
"stub",
"=",
"stub",
".",
"replace",
"(",
"'DummyClass'",
",",
"self",
".",
"_get_class_name",
"(",
"name",
")",
")",
"if",
"table",
"is",
"not",
"None",
":",
"stub",
"=",
"stub",
".",
"replace",
"(",
"'dummy_table'",
",",
"table",
")",
"return",
"stub"
] | Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str | [
"Populate",
"the",
"placeholders",
"in",
"the",
"migration",
"stub",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/migrations/migration_creator.py#L70-L90 |
sdispater/eloquent | eloquent/orm/relations/morph_pivot.py | MorphPivot._set_keys_for_save_query | def _set_keys_for_save_query(self, query):
"""
Set the keys for a save update query.
:param query: A Builder instance
:type query: eloquent.orm.Builder
:return: The Builder instance
:rtype: eloquent.orm.Builder
"""
query.where(self._morph_type, self._morph_class)
return super(MorphPivot, self)._set_keys_for_save_query(query) | python | def _set_keys_for_save_query(self, query):
"""
Set the keys for a save update query.
:param query: A Builder instance
:type query: eloquent.orm.Builder
:return: The Builder instance
:rtype: eloquent.orm.Builder
"""
query.where(self._morph_type, self._morph_class)
return super(MorphPivot, self)._set_keys_for_save_query(query) | [
"def",
"_set_keys_for_save_query",
"(",
"self",
",",
"query",
")",
":",
"query",
".",
"where",
"(",
"self",
".",
"_morph_type",
",",
"self",
".",
"_morph_class",
")",
"return",
"super",
"(",
"MorphPivot",
",",
"self",
")",
".",
"_set_keys_for_save_query",
"(",
"query",
")"
] | Set the keys for a save update query.
:param query: A Builder instance
:type query: eloquent.orm.Builder
:return: The Builder instance
:rtype: eloquent.orm.Builder | [
"Set",
"the",
"keys",
"for",
"a",
"save",
"update",
"query",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_pivot.py#L11-L23 |
sdispater/eloquent | eloquent/orm/relations/morph_pivot.py | MorphPivot.delete | def delete(self):
"""
Delete the pivot model record from the database.
:rtype: int
"""
query = self._get_delete_query()
query.where(self._morph_type, self._morph_class)
return query.delete() | python | def delete(self):
"""
Delete the pivot model record from the database.
:rtype: int
"""
query = self._get_delete_query()
query.where(self._morph_type, self._morph_class)
return query.delete() | [
"def",
"delete",
"(",
"self",
")",
":",
"query",
"=",
"self",
".",
"_get_delete_query",
"(",
")",
"query",
".",
"where",
"(",
"self",
".",
"_morph_type",
",",
"self",
".",
"_morph_class",
")",
"return",
"query",
".",
"delete",
"(",
")"
] | Delete the pivot model record from the database.
:rtype: int | [
"Delete",
"the",
"pivot",
"model",
"record",
"from",
"the",
"database",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_pivot.py#L25-L35 |
sdispater/eloquent | eloquent/orm/relations/morph_one_or_many.py | MorphOneOrMany.get_relation_count_query | def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
query = super(MorphOneOrMany, self).get_relation_count_query(query, parent)
return query.where(self._morph_type, self._morph_class) | python | def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
query = super(MorphOneOrMany, self).get_relation_count_query(query, parent)
return query.where(self._morph_type, self._morph_class) | [
"def",
"get_relation_count_query",
"(",
"self",
",",
"query",
",",
"parent",
")",
":",
"query",
"=",
"super",
"(",
"MorphOneOrMany",
",",
"self",
")",
".",
"get_relation_count_query",
"(",
"query",
",",
"parent",
")",
"return",
"query",
".",
"where",
"(",
"self",
".",
"_morph_type",
",",
"self",
".",
"_morph_class",
")"
] | Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder | [
"Add",
"the",
"constraints",
"for",
"a",
"relationship",
"count",
"query",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L38-L49 |
sdispater/eloquent | eloquent/orm/relations/morph_one_or_many.py | MorphOneOrMany.add_eager_constraints | def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
super(MorphOneOrMany, self).add_eager_constraints(models)
self._query.where(self._morph_type, self._morph_class) | python | def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
super(MorphOneOrMany, self).add_eager_constraints(models)
self._query.where(self._morph_type, self._morph_class) | [
"def",
"add_eager_constraints",
"(",
"self",
",",
"models",
")",
":",
"super",
"(",
"MorphOneOrMany",
",",
"self",
")",
".",
"add_eager_constraints",
"(",
"models",
")",
"self",
".",
"_query",
".",
"where",
"(",
"self",
".",
"_morph_type",
",",
"self",
".",
"_morph_class",
")"
] | Set the constraints for an eager load of the relation.
:type models: list | [
"Set",
"the",
"constraints",
"for",
"an",
"eager",
"load",
"of",
"the",
"relation",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L51-L59 |
sdispater/eloquent | eloquent/orm/relations/morph_one_or_many.py | MorphOneOrMany.save | def save(self, model):
"""
Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model
"""
model.set_attribute(self.get_plain_morph_type(), self._morph_class)
return super(MorphOneOrMany, self).save(model) | python | def save(self, model):
"""
Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model
"""
model.set_attribute(self.get_plain_morph_type(), self._morph_class)
return super(MorphOneOrMany, self).save(model) | [
"def",
"save",
"(",
"self",
",",
"model",
")",
":",
"model",
".",
"set_attribute",
"(",
"self",
".",
"get_plain_morph_type",
"(",
")",
",",
"self",
".",
"_morph_class",
")",
"return",
"super",
"(",
"MorphOneOrMany",
",",
"self",
")",
".",
"save",
"(",
"model",
")"
] | Attach a model instance to the parent models.
:param model: The model instance to attach
:type model: Model
:rtype: Model | [
"Attach",
"a",
"model",
"instance",
"to",
"the",
"parent",
"models",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L61-L72 |
sdispater/eloquent | eloquent/orm/relations/morph_one_or_many.py | MorphOneOrMany.find_or_new | def find_or_new(self, id, columns=None):
"""
Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model
"""
if columns is None:
columns = ['*']
instance = self.find(id, columns)
if instance is None:
instance = self._related.new_instance()
self._set_foreign_attributes_for_create(instance)
return instance | python | def find_or_new(self, id, columns=None):
"""
Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model
"""
if columns is None:
columns = ['*']
instance = self.find(id, columns)
if instance is None:
instance = self._related.new_instance()
self._set_foreign_attributes_for_create(instance)
return instance | [
"def",
"find_or_new",
"(",
"self",
",",
"id",
",",
"columns",
"=",
"None",
")",
":",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"[",
"'*'",
"]",
"instance",
"=",
"self",
".",
"find",
"(",
"id",
",",
"columns",
")",
"if",
"instance",
"is",
"None",
":",
"instance",
"=",
"self",
".",
"_related",
".",
"new_instance",
"(",
")",
"self",
".",
"_set_foreign_attributes_for_create",
"(",
"instance",
")",
"return",
"instance"
] | Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model | [
"Find",
"a",
"model",
"by",
"its",
"primary",
"key",
"or",
"return",
"new",
"instance",
"of",
"the",
"related",
"model",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L74-L95 |
sdispater/eloquent | eloquent/orm/relations/morph_one_or_many.py | MorphOneOrMany._set_foreign_attributes_for_create | def _set_foreign_attributes_for_create(self, model):
"""
Set the foreign ID and type for creation a related model.
"""
model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
model.set_attribute(self.get_plain_morph_type(), self._morph_class) | python | def _set_foreign_attributes_for_create(self, model):
"""
Set the foreign ID and type for creation a related model.
"""
model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
model.set_attribute(self.get_plain_morph_type(), self._morph_class) | [
"def",
"_set_foreign_attributes_for_create",
"(",
"self",
",",
"model",
")",
":",
"model",
".",
"set_attribute",
"(",
"self",
".",
"get_plain_foreign_key",
"(",
")",
",",
"self",
".",
"get_parent_key",
"(",
")",
")",
"model",
".",
"set_attribute",
"(",
"self",
".",
"get_plain_morph_type",
"(",
")",
",",
"self",
".",
"_morph_class",
")"
] | Set the foreign ID and type for creation a related model. | [
"Set",
"the",
"foreign",
"ID",
"and",
"type",
"for",
"creation",
"a",
"related",
"model",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_one_or_many.py#L176-L182 |
sdispater/eloquent | eloquent/database_manager.py | BaseDatabaseManager._parse_connection_name | def _parse_connection_name(self, name):
"""
Parse the connection into a tuple of the name and read / write type
:param name: The name of the connection
:type name: str
:return: A tuple of the name and read / write type
:rtype: tuple
"""
if name is None:
name = self.get_default_connection()
if name.endswith(('::read', '::write')):
return name.split('::', 1)
return name, None | python | def _parse_connection_name(self, name):
"""
Parse the connection into a tuple of the name and read / write type
:param name: The name of the connection
:type name: str
:return: A tuple of the name and read / write type
:rtype: tuple
"""
if name is None:
name = self.get_default_connection()
if name.endswith(('::read', '::write')):
return name.split('::', 1)
return name, None | [
"def",
"_parse_connection_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"self",
".",
"get_default_connection",
"(",
")",
"if",
"name",
".",
"endswith",
"(",
"(",
"'::read'",
",",
"'::write'",
")",
")",
":",
"return",
"name",
".",
"split",
"(",
"'::'",
",",
"1",
")",
"return",
"name",
",",
"None"
] | Parse the connection into a tuple of the name and read / write type
:param name: The name of the connection
:type name: str
:return: A tuple of the name and read / write type
:rtype: tuple | [
"Parse",
"the",
"connection",
"into",
"a",
"tuple",
"of",
"the",
"name",
"and",
"read",
"/",
"write",
"type"
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/database_manager.py#L47-L63 |
sdispater/eloquent | eloquent/database_manager.py | BaseDatabaseManager.purge | def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] | python | def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
"""
self.disconnect(name)
if name in self._connections:
del self._connections[name] | [
"def",
"purge",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"self",
".",
"disconnect",
"(",
"name",
")",
"if",
"name",
"in",
"self",
".",
"_connections",
":",
"del",
"self",
".",
"_connections",
"[",
"name",
"]"
] | Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None | [
"Disconnect",
"from",
"the",
"given",
"database",
"and",
"remove",
"from",
"local",
"cache"
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/database_manager.py#L65-L77 |
sdispater/eloquent | eloquent/orm/relations/relation.py | Relation.no_constraints | def no_constraints(cls, callback):
"""
Runs a callback with constraints disabled on the relation.
"""
cls._constraints = False
results = callback()
cls._constraints = True
return results | python | def no_constraints(cls, callback):
"""
Runs a callback with constraints disabled on the relation.
"""
cls._constraints = False
results = callback()
cls._constraints = True
return results | [
"def",
"no_constraints",
"(",
"cls",
",",
"callback",
")",
":",
"cls",
".",
"_constraints",
"=",
"False",
"results",
"=",
"callback",
"(",
")",
"cls",
".",
"_constraints",
"=",
"True",
"return",
"results"
] | Runs a callback with constraints disabled on the relation. | [
"Runs",
"a",
"callback",
"with",
"constraints",
"disabled",
"on",
"the",
"relation",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/relation.py#L111-L121 |
sdispater/eloquent | eloquent/orm/relations/relation.py | Relation.get_keys | def get_keys(self, models, key=None):
"""
Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list
"""
return list(set(map(lambda value: value.get_attribute(key) if key else value.get_key(), models))) | python | def get_keys(self, models, key=None):
"""
Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list
"""
return list(set(map(lambda value: value.get_attribute(key) if key else value.get_key(), models))) | [
"def",
"get_keys",
"(",
"self",
",",
"models",
",",
"key",
"=",
"None",
")",
":",
"return",
"list",
"(",
"set",
"(",
"map",
"(",
"lambda",
"value",
":",
"value",
".",
"get_attribute",
"(",
"key",
")",
"if",
"key",
"else",
"value",
".",
"get_key",
"(",
")",
",",
"models",
")",
")",
")"
] | Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list | [
"Get",
"all",
"the",
"primary",
"keys",
"for",
"an",
"array",
"of",
"models",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/relation.py#L123-L132 |
sdispater/eloquent | eloquent/orm/relations/has_many_through.py | HasManyThrough.add_constraints | def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
parent_table = self._parent.get_table()
self._set_join()
if self._constraints:
self._query.where('%s.%s' % (parent_table, self._first_key), '=', self._far_parent.get_key()) | python | def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
parent_table = self._parent.get_table()
self._set_join()
if self._constraints:
self._query.where('%s.%s' % (parent_table, self._first_key), '=', self._far_parent.get_key()) | [
"def",
"add_constraints",
"(",
"self",
")",
":",
"parent_table",
"=",
"self",
".",
"_parent",
".",
"get_table",
"(",
")",
"self",
".",
"_set_join",
"(",
")",
"if",
"self",
".",
"_constraints",
":",
"self",
".",
"_query",
".",
"where",
"(",
"'%s.%s'",
"%",
"(",
"parent_table",
",",
"self",
".",
"_first_key",
")",
",",
"'='",
",",
"self",
".",
"_far_parent",
".",
"get_key",
"(",
")",
")"
] | Set the base constraints on the relation query.
:rtype: None | [
"Set",
"the",
"base",
"constraints",
"on",
"the",
"relation",
"query",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/has_many_through.py#L29-L40 |
sdispater/eloquent | eloquent/orm/relations/has_many_through.py | HasManyThrough.get_relation_count_query | def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
parent_table = self._parent.get_table()
self._set_join(query)
query.select(QueryExpression('COUNT(*)'))
key = self.wrap('%s.%s' % (parent_table, self._first_key))
return query.where(self.get_has_compare_key(), '=', QueryExpression(key)) | python | def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder
"""
parent_table = self._parent.get_table()
self._set_join(query)
query.select(QueryExpression('COUNT(*)'))
key = self.wrap('%s.%s' % (parent_table, self._first_key))
return query.where(self.get_has_compare_key(), '=', QueryExpression(key)) | [
"def",
"get_relation_count_query",
"(",
"self",
",",
"query",
",",
"parent",
")",
":",
"parent_table",
"=",
"self",
".",
"_parent",
".",
"get_table",
"(",
")",
"self",
".",
"_set_join",
"(",
"query",
")",
"query",
".",
"select",
"(",
"QueryExpression",
"(",
"'COUNT(*)'",
")",
")",
"key",
"=",
"self",
".",
"wrap",
"(",
"'%s.%s'",
"%",
"(",
"parent_table",
",",
"self",
".",
"_first_key",
")",
")",
"return",
"query",
".",
"where",
"(",
"self",
".",
"get_has_compare_key",
"(",
")",
",",
"'='",
",",
"QueryExpression",
"(",
"key",
")",
")"
] | Add the constraints for a relationship count query.
:type query: Builder
:type parent: Builder
:rtype: Builder | [
"Add",
"the",
"constraints",
"for",
"a",
"relationship",
"count",
"query",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/has_many_through.py#L42-L59 |
sdispater/eloquent | eloquent/orm/relations/has_many_through.py | HasManyThrough._set_join | def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) | python | def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) | [
"def",
"_set_join",
"(",
"self",
",",
"query",
"=",
"None",
")",
":",
"if",
"not",
"query",
":",
"query",
"=",
"self",
".",
"_query",
"foreign_key",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"_related",
".",
"get_table",
"(",
")",
",",
"self",
".",
"_second_key",
")",
"query",
".",
"join",
"(",
"self",
".",
"_parent",
".",
"get_table",
"(",
")",
",",
"self",
".",
"get_qualified_parent_key_name",
"(",
")",
",",
"'='",
",",
"foreign_key",
")"
] | Set the join clause for the query. | [
"Set",
"the",
"join",
"clause",
"for",
"the",
"query",
"."
] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/has_many_through.py#L61-L70 |
YeoLab/anchor | anchor/simulate.py | plot_best_worst_fits | def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() | python | def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() | [
"def",
"plot_best_worst_fits",
"(",
"assignments_df",
",",
"data",
",",
"modality_col",
"=",
"'Modality'",
",",
"score",
"=",
"'$\\log_2 K$'",
")",
":",
"ncols",
"=",
"2",
"nrows",
"=",
"len",
"(",
"assignments_df",
".",
"groupby",
"(",
"modality_col",
")",
".",
"groups",
".",
"keys",
"(",
")",
")",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"nrows",
",",
"ncols",
"=",
"ncols",
",",
"figsize",
"=",
"(",
"nrows",
"*",
"4",
",",
"ncols",
"*",
"6",
")",
")",
"axes_iter",
"=",
"axes",
".",
"flat",
"fits",
"=",
"'Highest'",
",",
"'Lowest'",
"for",
"modality",
",",
"df",
"in",
"assignments_df",
".",
"groupby",
"(",
"modality_col",
")",
":",
"df",
"=",
"df",
".",
"sort_values",
"(",
"score",
")",
"color",
"=",
"MODALITY_TO_COLOR",
"[",
"modality",
"]",
"for",
"fit",
"in",
"fits",
":",
"if",
"fit",
"==",
"'Highest'",
":",
"ids",
"=",
"df",
"[",
"'Feature ID'",
"]",
"[",
"-",
"10",
":",
"]",
"else",
":",
"ids",
"=",
"df",
"[",
"'Feature ID'",
"]",
"[",
":",
"10",
"]",
"fit_psi",
"=",
"data",
"[",
"ids",
"]",
"tidy_fit_psi",
"=",
"fit_psi",
".",
"stack",
"(",
")",
".",
"reset_index",
"(",
")",
"tidy_fit_psi",
"=",
"tidy_fit_psi",
".",
"rename",
"(",
"columns",
"=",
"{",
"'level_0'",
":",
"'Sample ID'",
",",
"'level_1'",
":",
"'Feature ID'",
",",
"0",
":",
"'$\\Psi$'",
"}",
")",
"if",
"tidy_fit_psi",
".",
"empty",
":",
"continue",
"ax",
"=",
"six",
".",
"next",
"(",
"axes_iter",
")",
"violinplot",
"(",
"x",
"=",
"'Feature ID'",
",",
"y",
"=",
"'$\\Psi$'",
",",
"data",
"=",
"tidy_fit_psi",
",",
"color",
"=",
"color",
",",
"ax",
"=",
"ax",
")",
"ax",
".",
"set",
"(",
"title",
"=",
"'{} {} {}'",
".",
"format",
"(",
"fit",
",",
"score",
",",
"modality",
")",
",",
"xticks",
"=",
"[",
"]",
")",
"sns",
".",
"despine",
"(",
")",
"fig",
".",
"tight_layout",
"(",
")"
] | Violinplots of the highest and lowest scoring of each modality | [
"Violinplots",
"of",
"the",
"highest",
"and",
"lowest",
"scoring",
"of",
"each",
"modality"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/simulate.py#L158-L194 |
YeoLab/anchor | anchor/visualize.py | violinplot | def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',
inner=None, ax=None, **kwargs):
"""Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot
"""
if ax is None:
ax = plt.gca()
sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax,
**kwargs)
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1))
return ax | python | def violinplot(x=None, y=None, data=None, bw=0.2, scale='width',
inner=None, ax=None, **kwargs):
"""Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot
"""
if ax is None:
ax = plt.gca()
sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax,
**kwargs)
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1))
return ax | [
"def",
"violinplot",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"data",
"=",
"None",
",",
"bw",
"=",
"0.2",
",",
"scale",
"=",
"'width'",
",",
"inner",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"sns",
".",
"violinplot",
"(",
"x",
",",
"y",
",",
"data",
"=",
"data",
",",
"bw",
"=",
"bw",
",",
"scale",
"=",
"scale",
",",
"inner",
"=",
"inner",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"set",
"(",
"ylim",
"=",
"(",
"0",
",",
"1",
")",
",",
"yticks",
"=",
"(",
"0",
",",
"0.5",
",",
"1",
")",
")",
"return",
"ax"
] | Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot | [
"Wrapper",
"around",
"Seaborn",
"s",
"Violinplot",
"specifically",
"for",
"[",
"0",
"1",
"]",
"ranged",
"data"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L33-L48 |
YeoLab/anchor | anchor/visualize.py | ModalitiesViz.bar | def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
"""
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine() | python | def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
"""
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine() | [
"def",
"bar",
"(",
"self",
",",
"counts",
",",
"phenotype_to_color",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"percentages",
"=",
"True",
")",
":",
"if",
"percentages",
":",
"counts",
"=",
"100",
"*",
"(",
"counts",
".",
"T",
"/",
"counts",
".",
"T",
".",
"sum",
"(",
")",
")",
".",
"T",
"# with sns.set(style='whitegrid'):",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"full_width",
"=",
"0.8",
"width",
"=",
"full_width",
"/",
"counts",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
",",
"(",
"group",
",",
"series",
")",
"in",
"enumerate",
"(",
"counts",
".",
"iterrows",
"(",
")",
")",
":",
"left",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"modality_order",
")",
")",
"+",
"i",
"*",
"width",
"height",
"=",
"[",
"series",
"[",
"i",
"]",
"if",
"i",
"in",
"series",
"else",
"0",
"for",
"i",
"in",
"self",
".",
"modality_order",
"]",
"color",
"=",
"phenotype_to_color",
"[",
"group",
"]",
"ax",
".",
"bar",
"(",
"left",
",",
"height",
",",
"width",
"=",
"width",
",",
"color",
"=",
"color",
",",
"label",
"=",
"group",
",",
"linewidth",
"=",
".5",
",",
"edgecolor",
"=",
"'k'",
")",
"ylabel",
"=",
"'Percentage of events'",
"if",
"percentages",
"else",
"'Number of events'",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"ax",
".",
"set_xticks",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"modality_order",
")",
")",
"+",
"full_width",
"/",
"2",
")",
"ax",
".",
"set_xticklabels",
"(",
"self",
".",
"modality_order",
")",
"ax",
".",
"set_xlabel",
"(",
"'Splicing modality'",
")",
"ax",
".",
"set_xlim",
"(",
"0",
",",
"len",
"(",
"self",
".",
"modality_order",
")",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'best'",
")",
"ax",
".",
"grid",
"(",
"axis",
"=",
"'y'",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"0.5",
")",
"sns",
".",
"despine",
"(",
")"
] | Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------ | [
"Draw",
"barplots",
"grouped",
"by",
"modality",
"of",
"modality",
"percentage",
"per",
"group"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L110-L149 |
YeoLab/anchor | anchor/visualize.py | ModalitiesViz.event_estimation | def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
"""
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter | python | def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
"""
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter | [
"def",
"event_estimation",
"(",
"self",
",",
"event",
",",
"logliks",
",",
"logsumexps",
",",
"renamed",
"=",
"''",
")",
":",
"plotter",
"=",
"_ModelLoglikPlotter",
"(",
")",
"plotter",
".",
"plot",
"(",
"event",
",",
"logliks",
",",
"logsumexps",
",",
"self",
".",
"modality_to_color",
",",
"renamed",
"=",
"renamed",
")",
"return",
"plotter"
] | Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------ | [
"Show",
"the",
"values",
"underlying",
"bayesian",
"modality",
"estimations",
"of",
"an",
"event"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/visualize.py#L151-L168 |
YeoLab/anchor | anchor/binning.py | BinnedModalities.predict | def predict(self, fitted):
"""Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature.
"""
if fitted.shape[0] != len(self.modalities):
raise ValueError("This data doesn't look like it had the distance "
"between it and the five modalities calculated")
return fitted.idxmin() | python | def predict(self, fitted):
"""Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature.
"""
if fitted.shape[0] != len(self.modalities):
raise ValueError("This data doesn't look like it had the distance "
"between it and the five modalities calculated")
return fitted.idxmin() | [
"def",
"predict",
"(",
"self",
",",
"fitted",
")",
":",
"if",
"fitted",
".",
"shape",
"[",
"0",
"]",
"!=",
"len",
"(",
"self",
".",
"modalities",
")",
":",
"raise",
"ValueError",
"(",
"\"This data doesn't look like it had the distance \"",
"\"between it and the five modalities calculated\"",
")",
"return",
"fitted",
".",
"idxmin",
"(",
")"
] | Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature. | [
"Assign",
"the",
"most",
"likely",
"modality",
"given",
"the",
"fitted",
"data"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/binning.py#L37-L50 |
YeoLab/anchor | anchor/model.py | ModalityModel.logliks | def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
"""
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)]) | python | def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
"""
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)]) | [
"def",
"logliks",
"(",
"self",
",",
"x",
")",
":",
"x",
"=",
"x",
".",
"copy",
"(",
")",
"# Replace exactly 0 and exactly 1 values with a very small number",
"# (machine epsilon, the smallest number that this computer is capable",
"# of storing) because 0 and 1 are not in the Beta distribution.",
"x",
"[",
"x",
"==",
"0",
"]",
"=",
"VERY_SMALL_NUMBER",
"x",
"[",
"x",
"==",
"1",
"]",
"=",
"1",
"-",
"VERY_SMALL_NUMBER",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"log",
"(",
"prob",
")",
"+",
"rv",
".",
"logpdf",
"(",
"x",
"[",
"np",
".",
"isfinite",
"(",
"x",
")",
"]",
")",
".",
"sum",
"(",
")",
"for",
"prob",
",",
"rv",
"in",
"zip",
"(",
"self",
".",
"prob_parameters",
",",
"self",
".",
"rvs",
")",
"]",
")"
] | Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family | [
"Calculate",
"log",
"-",
"likelihood",
"of",
"a",
"feature",
"x",
"for",
"each",
"model"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L71-L97 |
YeoLab/anchor | anchor/model.py | ModalityModel.nice_number_string | def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction"""
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number) | python | def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction"""
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number) | [
"def",
"nice_number_string",
"(",
"number",
",",
"decimal_places",
"=",
"2",
")",
":",
"if",
"number",
"==",
"np",
".",
"round",
"(",
"number",
")",
":",
"return",
"str",
"(",
"int",
"(",
"number",
")",
")",
"elif",
"number",
"<",
"1",
"and",
"number",
">",
"0",
":",
"inverse",
"=",
"1",
"/",
"number",
"if",
"int",
"(",
"inverse",
")",
"==",
"np",
".",
"round",
"(",
"inverse",
")",
":",
"return",
"r'\\frac{{1}}{{{}}}'",
".",
"format",
"(",
"int",
"(",
"inverse",
")",
")",
"else",
":",
"template",
"=",
"'{{:.{0}}}'",
".",
"format",
"(",
"decimal_places",
")",
"return",
"template",
".",
"format",
"(",
"number",
")"
] | Convert floats to either integers or a nice looking fraction | [
"Convert",
"floats",
"to",
"either",
"integers",
"or",
"a",
"nice",
"looking",
"fraction"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L119-L129 |
YeoLab/anchor | anchor/model.py | ModalityModel.violinplot | def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
"""
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax | python | def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
"""
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax | [
"def",
"violinplot",
"(",
"self",
",",
"n",
"=",
"1000",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'palette'",
",",
"'Purples'",
")",
"dfs",
"=",
"[",
"]",
"for",
"rv",
"in",
"self",
".",
"rvs",
":",
"psi",
"=",
"rv",
".",
"rvs",
"(",
"n",
")",
"df",
"=",
"pd",
".",
"Series",
"(",
"psi",
",",
"name",
"=",
"self",
".",
"ylabel",
")",
".",
"to_frame",
"(",
")",
"alpha",
",",
"beta",
"=",
"rv",
".",
"args",
"alpha",
"=",
"self",
".",
"nice_number_string",
"(",
"alpha",
",",
"decimal_places",
"=",
"2",
")",
"beta",
"=",
"self",
".",
"nice_number_string",
"(",
"beta",
",",
"decimal_places",
"=",
"2",
")",
"df",
"[",
"'parameters'",
"]",
"=",
"'$\\\\alpha = {0}$\\n$\\\\beta = {1}$'",
".",
"format",
"(",
"alpha",
",",
"beta",
")",
"dfs",
".",
"append",
"(",
"df",
")",
"data",
"=",
"pd",
".",
"concat",
"(",
"dfs",
")",
"if",
"'ax'",
"not",
"in",
"kwargs",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"len",
"(",
"self",
".",
"alphas",
")",
"*",
"0.625",
",",
"4",
")",
")",
"else",
":",
"ax",
"=",
"kwargs",
".",
"pop",
"(",
"'ax'",
")",
"ax",
"=",
"violinplot",
"(",
"x",
"=",
"'parameters'",
",",
"y",
"=",
"self",
".",
"ylabel",
",",
"data",
"=",
"data",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"sns",
".",
"despine",
"(",
"ax",
"=",
"ax",
")",
"return",
"ax"
] | Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted | [
"Plot",
"violins",
"of",
"each",
"distribution",
"in",
"the",
"model",
"family"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/model.py#L131-L169 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities._single_feature_logliks_one_step | def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True) | python | def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
"""
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True) | [
"def",
"_single_feature_logliks_one_step",
"(",
"self",
",",
"feature",
",",
"models",
")",
":",
"x_non_na",
"=",
"feature",
"[",
"~",
"feature",
".",
"isnull",
"(",
")",
"]",
"if",
"x_non_na",
".",
"empty",
":",
"return",
"pd",
".",
"DataFrame",
"(",
")",
"else",
":",
"dfs",
"=",
"[",
"]",
"for",
"name",
",",
"model",
"in",
"models",
".",
"items",
"(",
")",
":",
"df",
"=",
"model",
".",
"single_feature_logliks",
"(",
"feature",
")",
"df",
"[",
"'Modality'",
"]",
"=",
"name",
"dfs",
".",
"append",
"(",
"df",
")",
"return",
"pd",
".",
"concat",
"(",
"dfs",
",",
"ignore_index",
"=",
"True",
")"
] | Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame | [
"Get",
"log",
"-",
"likelihood",
"of",
"models",
"at",
"each",
"parameterization",
"for",
"given",
"data"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L54-L77 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.fit | def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors | python | def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors | [
"def",
"fit",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"assert_less_than_or_equal_1",
"(",
"data",
".",
"values",
".",
"flat",
")",
"self",
".",
"assert_non_negative",
"(",
"data",
".",
"values",
".",
"flat",
")",
"if",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"log2_bayes_factors",
"=",
"data",
".",
"apply",
"(",
"self",
".",
"single_feature_fit",
")",
"elif",
"isinstance",
"(",
"data",
",",
"pd",
".",
"Series",
")",
":",
"log2_bayes_factors",
"=",
"self",
".",
"single_feature_fit",
"(",
"data",
")",
"log2_bayes_factors",
".",
"name",
"=",
"self",
".",
"score_name",
"return",
"log2_bayes_factors"
] | Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1. | [
"Get",
"the",
"modality",
"assignments",
"of",
"each",
"splicing",
"event",
"in",
"the",
"data"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L111-L139 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.predict | def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() | python | def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
"""
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() | [
"def",
"predict",
"(",
"self",
",",
"log2_bayes_factors",
",",
"reset_index",
"=",
"False",
")",
":",
"if",
"reset_index",
":",
"x",
"=",
"log2_bayes_factors",
".",
"reset_index",
"(",
"level",
"=",
"0",
",",
"drop",
"=",
"True",
")",
"else",
":",
"x",
"=",
"log2_bayes_factors",
"if",
"isinstance",
"(",
"x",
",",
"pd",
".",
"DataFrame",
")",
":",
"not_na",
"=",
"(",
"x",
".",
"notnull",
"(",
")",
">",
"0",
")",
".",
"any",
"(",
")",
"not_na_columns",
"=",
"not_na",
"[",
"not_na",
"]",
".",
"index",
"x",
".",
"ix",
"[",
"NULL_MODEL",
",",
"not_na_columns",
"]",
"=",
"self",
".",
"logbf_thresh",
"elif",
"isinstance",
"(",
"x",
",",
"pd",
".",
"Series",
")",
":",
"x",
"[",
"NULL_MODEL",
"]",
"=",
"self",
".",
"logbf_thresh",
"return",
"x",
".",
"idxmax",
"(",
")"
] | Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event | [
"Guess",
"the",
"most",
"likely",
"modality",
"for",
"each",
"event"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L141-L178 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.single_feature_logliks | def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks | python | def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
"""
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks | [
"def",
"single_feature_logliks",
"(",
"self",
",",
"feature",
")",
":",
"self",
".",
"assert_less_than_or_equal_1",
"(",
"feature",
".",
"values",
")",
"self",
".",
"assert_non_negative",
"(",
"feature",
".",
"values",
")",
"logliks",
"=",
"self",
".",
"_single_feature_logliks_one_step",
"(",
"feature",
",",
"self",
".",
"one_param_models",
")",
"logsumexps",
"=",
"self",
".",
"logliks_to_logsumexp",
"(",
"logliks",
")",
"# If none of the one-parameter models passed, try the two-param models",
"if",
"(",
"logsumexps",
"<=",
"self",
".",
"logbf_thresh",
")",
".",
"all",
"(",
")",
":",
"logliks_two_params",
"=",
"self",
".",
"_single_feature_logliks_one_step",
"(",
"feature",
",",
"self",
".",
"two_param_models",
")",
"logliks",
"=",
"pd",
".",
"concat",
"(",
"[",
"logliks",
",",
"logliks_two_params",
"]",
")",
"return",
"logliks"
] | Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1. | [
"Calculate",
"log",
"-",
"likelihoods",
"of",
"each",
"modality",
"s",
"parameterization"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L184-L218 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.single_feature_fit | def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series | python | def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality"""
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series | [
"def",
"single_feature_fit",
"(",
"self",
",",
"feature",
")",
":",
"if",
"np",
".",
"isfinite",
"(",
"feature",
")",
".",
"sum",
"(",
")",
"==",
"0",
":",
"series",
"=",
"pd",
".",
"Series",
"(",
"index",
"=",
"MODALITY_ORDER",
")",
"else",
":",
"logbf_one_param",
"=",
"pd",
".",
"Series",
"(",
"{",
"k",
":",
"v",
".",
"logsumexp_logliks",
"(",
"feature",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"one_param_models",
".",
"items",
"(",
")",
"}",
")",
"# Check if none of the previous features fit",
"if",
"(",
"logbf_one_param",
"<=",
"self",
".",
"logbf_thresh",
")",
".",
"all",
"(",
")",
":",
"logbf_two_param",
"=",
"pd",
".",
"Series",
"(",
"{",
"k",
":",
"v",
".",
"logsumexp_logliks",
"(",
"feature",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"two_param_models",
".",
"items",
"(",
")",
"}",
")",
"series",
"=",
"pd",
".",
"concat",
"(",
"[",
"logbf_one_param",
",",
"logbf_two_param",
"]",
")",
"series",
"[",
"NULL_MODEL",
"]",
"=",
"self",
".",
"logbf_thresh",
"else",
":",
"series",
"=",
"logbf_one_param",
"series",
".",
"index",
".",
"name",
"=",
"'Modality'",
"series",
".",
"name",
"=",
"self",
".",
"score_name",
"return",
"series"
] | Get the log2 bayes factor of the fit for each modality | [
"Get",
"the",
"log2",
"bayes",
"factor",
"of",
"the",
"fit",
"for",
"each",
"modality"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L225-L245 |
YeoLab/anchor | anchor/bayesian.py | BayesianModalities.violinplot | def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout() | python | def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
"""
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout() | [
"def",
"violinplot",
"(",
"self",
",",
"n",
"=",
"1000",
",",
"figsize",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"figsize",
"is",
"None",
":",
"nrows",
"=",
"len",
"(",
"self",
".",
"models",
")",
"width",
"=",
"max",
"(",
"len",
"(",
"m",
".",
"rvs",
")",
"for",
"name",
",",
"m",
"in",
"self",
".",
"models",
".",
"items",
"(",
")",
")",
"*",
"0.625",
"height",
"=",
"nrows",
"*",
"2.5",
"figsize",
"=",
"width",
",",
"height",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
"=",
"nrows",
",",
"figsize",
"=",
"figsize",
")",
"for",
"ax",
",",
"model_name",
"in",
"zip",
"(",
"axes",
",",
"MODALITY_ORDER",
")",
":",
"try",
":",
"model",
"=",
"self",
".",
"models",
"[",
"model_name",
"]",
"cmap",
"=",
"MODALITY_TO_CMAP",
"[",
"model_name",
"]",
"palette",
"=",
"cmap",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"len",
"(",
"model",
".",
"rvs",
")",
")",
")",
"model",
".",
"violinplot",
"(",
"n",
"=",
"n",
",",
"ax",
"=",
"ax",
",",
"palette",
"=",
"palette",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"set",
"(",
"title",
"=",
"model_name",
",",
"xlabel",
"=",
"''",
")",
"except",
"KeyError",
":",
"continue",
"fig",
".",
"tight_layout",
"(",
")"
] | r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted | [
"r",
"Visualize",
"all",
"modality",
"family",
"members",
"with",
"parameters"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/bayesian.py#L258-L291 |
YeoLab/anchor | anchor/infotheory.py | bin_range_strings | def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] | python | def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
"""
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] | [
"def",
"bin_range_strings",
"(",
"bins",
",",
"fmt",
"=",
"':g'",
")",
":",
"return",
"[",
"(",
"'{'",
"+",
"fmt",
"+",
"'}-{'",
"+",
"fmt",
"+",
"'}'",
")",
".",
"format",
"(",
"i",
",",
"j",
")",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"bins",
",",
"bins",
"[",
"1",
":",
"]",
")",
"]"
] | Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1'] | [
"Given",
"a",
"list",
"of",
"bins",
"make",
"a",
"list",
"of",
"strings",
"of",
"those",
"bin",
"ranges"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L12-L29 |
YeoLab/anchor | anchor/infotheory.py | binify | def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned | python | def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
"""
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned | [
"def",
"binify",
"(",
"data",
",",
"bins",
")",
":",
"if",
"bins",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Must specify \"bins\"'",
")",
"if",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"binned",
"=",
"data",
".",
"apply",
"(",
"lambda",
"x",
":",
"pd",
".",
"Series",
"(",
"np",
".",
"histogram",
"(",
"x",
",",
"bins",
"=",
"bins",
",",
"range",
"=",
"(",
"0",
",",
"1",
")",
")",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"data",
",",
"pd",
".",
"Series",
")",
":",
"binned",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"histogram",
"(",
"data",
",",
"bins",
"=",
"bins",
",",
"range",
"=",
"(",
"0",
",",
"1",
")",
")",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'`data` must be either a 1d vector or 2d matrix'",
")",
"binned",
".",
"index",
"=",
"bin_range_strings",
"(",
"bins",
")",
"# Normalize so each column sums to 1",
"binned",
"=",
"binned",
"/",
"binned",
".",
"sum",
"(",
")",
".",
"astype",
"(",
"float",
")",
"return",
"binned"
] | Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows | [
"Makes",
"a",
"histogram",
"of",
"each",
"column",
"the",
"provided",
"binsize"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L46-L77 |
YeoLab/anchor | anchor/infotheory.py | kld | def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0) | python | def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0) | [
"def",
"kld",
"(",
"p",
",",
"q",
")",
":",
"try",
":",
"_check_prob_dist",
"(",
"p",
")",
"_check_prob_dist",
"(",
"q",
")",
"except",
"ValueError",
":",
"return",
"np",
".",
"nan",
"# If one of them is zero, then the other should be considered to be 0.",
"# In this problem formulation, log0 = 0",
"p",
"=",
"p",
".",
"replace",
"(",
"0",
",",
"np",
".",
"nan",
")",
"q",
"=",
"q",
".",
"replace",
"(",
"0",
",",
"np",
".",
"nan",
")",
"return",
"(",
"np",
".",
"log2",
"(",
"p",
"/",
"q",
")",
"*",
"p",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] | Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense. | [
"Kullback",
"-",
"Leiber",
"divergence",
"of",
"two",
"probability",
"distributions",
"pandas",
"dataframes",
"p",
"and",
"q"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L80-L119 |
YeoLab/anchor | anchor/infotheory.py | jsd | def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result | python | def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result | [
"def",
"jsd",
"(",
"p",
",",
"q",
")",
":",
"try",
":",
"_check_prob_dist",
"(",
"p",
")",
"_check_prob_dist",
"(",
"q",
")",
"except",
"ValueError",
":",
"return",
"np",
".",
"nan",
"weight",
"=",
"0.5",
"m",
"=",
"weight",
"*",
"(",
"p",
"+",
"q",
")",
"result",
"=",
"weight",
"*",
"kld",
"(",
"p",
",",
"m",
")",
"+",
"(",
"1",
"-",
"weight",
")",
"*",
"kld",
"(",
"q",
",",
"m",
")",
"return",
"result"
] | Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError | [
"Finds",
"the",
"per",
"-",
"column",
"JSD",
"between",
"dataframes",
"p",
"and",
"q"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L122-L157 |
YeoLab/anchor | anchor/infotheory.py | entropy | def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0) | python | def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
"""
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0) | [
"def",
"entropy",
"(",
"binned",
",",
"base",
"=",
"2",
")",
":",
"try",
":",
"_check_prob_dist",
"(",
"binned",
")",
"except",
"ValueError",
":",
"np",
".",
"nan",
"return",
"-",
"(",
"(",
"np",
".",
"log",
"(",
"binned",
")",
"/",
"np",
".",
"log",
"(",
"base",
")",
")",
"*",
"binned",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] | Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError | [
"Find",
"the",
"entropy",
"of",
"each",
"column",
"of",
"a",
"dataframe"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L160-L187 |
YeoLab/anchor | anchor/infotheory.py | binify_and_jsd | def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series | python | def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series | [
"def",
"binify_and_jsd",
"(",
"df1",
",",
"df2",
",",
"bins",
",",
"pair",
"=",
"None",
")",
":",
"binned1",
"=",
"binify",
"(",
"df1",
",",
"bins",
"=",
"bins",
")",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"1",
")",
"binned2",
"=",
"binify",
"(",
"df2",
",",
"bins",
"=",
"bins",
")",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"1",
")",
"binned1",
",",
"binned2",
"=",
"binned1",
".",
"align",
"(",
"binned2",
",",
"axis",
"=",
"1",
",",
"join",
"=",
"'inner'",
")",
"series",
"=",
"np",
".",
"sqrt",
"(",
"jsd",
"(",
"binned1",
",",
"binned2",
")",
")",
"series",
".",
"name",
"=",
"pair",
"return",
"series"
] | Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2 | [
"Binify",
"and",
"calculate",
"jensen",
"-",
"shannon",
"divergence",
"between",
"two",
"dataframes"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L190-L215 |
YeoLab/anchor | anchor/infotheory.py | cross_phenotype_jsd | def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) | python | def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) | [
"def",
"cross_phenotype_jsd",
"(",
"data",
",",
"groupby",
",",
"bins",
",",
"n_iter",
"=",
"100",
")",
":",
"grouped",
"=",
"data",
".",
"groupby",
"(",
"groupby",
")",
"jsds",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"phenotype1",
",",
"df1",
"in",
"grouped",
":",
"for",
"phenotype2",
",",
"df2",
"in",
"grouped",
":",
"pair",
"=",
"tuple",
"(",
"sorted",
"(",
"[",
"phenotype1",
",",
"phenotype2",
"]",
")",
")",
"if",
"pair",
"in",
"seen",
":",
"continue",
"seen",
".",
"add",
"(",
"pair",
")",
"if",
"phenotype1",
"==",
"phenotype2",
":",
"seriess",
"=",
"[",
"]",
"bs",
"=",
"cross_validation",
".",
"Bootstrap",
"(",
"df1",
".",
"shape",
"[",
"0",
"]",
",",
"n_iter",
"=",
"n_iter",
",",
"train_size",
"=",
"0.5",
")",
"for",
"i",
",",
"(",
"ind1",
",",
"ind2",
")",
"in",
"enumerate",
"(",
"bs",
")",
":",
"df1_subset",
"=",
"df1",
".",
"iloc",
"[",
"ind1",
",",
":",
"]",
"df2_subset",
"=",
"df2",
".",
"iloc",
"[",
"ind2",
",",
":",
"]",
"seriess",
".",
"append",
"(",
"binify_and_jsd",
"(",
"df1_subset",
",",
"df2_subset",
",",
"None",
",",
"bins",
")",
")",
"series",
"=",
"pd",
".",
"concat",
"(",
"seriess",
",",
"axis",
"=",
"1",
",",
"names",
"=",
"None",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"series",
".",
"name",
"=",
"pair",
"jsds",
".",
"append",
"(",
"series",
")",
"else",
":",
"series",
"=",
"binify_and_jsd",
"(",
"df1",
",",
"df2",
",",
"pair",
",",
"bins",
")",
"jsds",
".",
"append",
"(",
"series",
")",
"return",
"pd",
".",
"concat",
"(",
"jsds",
",",
"axis",
"=",
"1",
")"
] | Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes | [
"Jensen",
"-",
"Shannon",
"divergence",
"of",
"features",
"across",
"phenotypes"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L218-L266 |
YeoLab/anchor | anchor/infotheory.py | jsd_df_to_2d | def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1) | python | def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
"""
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1) | [
"def",
"jsd_df_to_2d",
"(",
"jsd_df",
")",
":",
"jsd_2d",
"=",
"jsd_df",
".",
"mean",
"(",
")",
".",
"reset_index",
"(",
")",
"jsd_2d",
"=",
"jsd_2d",
".",
"rename",
"(",
"columns",
"=",
"{",
"'level_0'",
":",
"'phenotype1'",
",",
"'level_1'",
":",
"'phenotype2'",
",",
"0",
":",
"'jsd'",
"}",
")",
"jsd_2d",
"=",
"jsd_2d",
".",
"pivot",
"(",
"index",
"=",
"'phenotype1'",
",",
"columns",
"=",
"'phenotype2'",
",",
"values",
"=",
"'jsd'",
")",
"return",
"jsd_2d",
"+",
"np",
".",
"tril",
"(",
"jsd_2d",
".",
"T",
",",
"-",
"1",
")"
] | Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes | [
"Transform",
"a",
"tall",
"JSD",
"dataframe",
"to",
"a",
"square",
"matrix",
"of",
"mean",
"JSDs"
] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L269-L289 |
orweis/winpcapy | winpcapy/winpcapy.py | WinPcap.run | def run(self, callback=None, limit=0):
"""
Start pcap's loop over the interface, calling the given callback for each packet
:param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
# Set new callback
self._callback = callback
# Run loop with callback wrapper
wtypes.pcap_loop(self._handle, limit, self._callback_wrapper, None) | python | def run(self, callback=None, limit=0):
"""
Start pcap's loop over the interface, calling the given callback for each packet
:param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
# Set new callback
self._callback = callback
# Run loop with callback wrapper
wtypes.pcap_loop(self._handle, limit, self._callback_wrapper, None) | [
"def",
"run",
"(",
"self",
",",
"callback",
"=",
"None",
",",
"limit",
"=",
"0",
")",
":",
"if",
"self",
".",
"_handle",
"is",
"None",
":",
"raise",
"self",
".",
"DeviceIsNotOpen",
"(",
")",
"# Set new callback",
"self",
".",
"_callback",
"=",
"callback",
"# Run loop with callback wrapper",
"wtypes",
".",
"pcap_loop",
"(",
"self",
".",
"_handle",
",",
"limit",
",",
"self",
".",
"_callback_wrapper",
",",
"None",
")"
] | Start pcap's loop over the interface, calling the given callback for each packet
:param callback: a function receiving (win_pcap, param, header, pkt_data) for each packet intercepted
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) | [
"Start",
"pcap",
"s",
"loop",
"over",
"the",
"interface",
"calling",
"the",
"given",
"callback",
"for",
"each",
"packet",
":",
"param",
"callback",
":",
"a",
"function",
"receiving",
"(",
"win_pcap",
"param",
"header",
"pkt_data",
")",
"for",
"each",
"packet",
"intercepted",
":",
"param",
"limit",
":",
"how",
"many",
"packets",
"to",
"capture",
"(",
"A",
"value",
"of",
"-",
"1",
"or",
"0",
"is",
"equivalent",
"to",
"infinity",
")"
] | train | https://github.com/orweis/winpcapy/blob/47ddb7f34de865c7ff6391c73b36b3af9a595158/winpcapy/winpcapy.py#L120-L131 |
orweis/winpcapy | winpcapy/winpcapy.py | WinPcap.send | def send(self, packet_buffer):
"""
send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
buffer_length = len(packet_buffer)
buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length),
ctypes.POINTER(ctypes.c_ubyte))
wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length) | python | def send(self, packet_buffer):
"""
send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT)
"""
if self._handle is None:
raise self.DeviceIsNotOpen()
buffer_length = len(packet_buffer)
buf_send = ctypes.cast(ctypes.create_string_buffer(packet_buffer, buffer_length),
ctypes.POINTER(ctypes.c_ubyte))
wtypes.pcap_sendpacket(self._handle, buf_send, buffer_length) | [
"def",
"send",
"(",
"self",
",",
"packet_buffer",
")",
":",
"if",
"self",
".",
"_handle",
"is",
"None",
":",
"raise",
"self",
".",
"DeviceIsNotOpen",
"(",
")",
"buffer_length",
"=",
"len",
"(",
"packet_buffer",
")",
"buf_send",
"=",
"ctypes",
".",
"cast",
"(",
"ctypes",
".",
"create_string_buffer",
"(",
"packet_buffer",
",",
"buffer_length",
")",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_ubyte",
")",
")",
"wtypes",
".",
"pcap_sendpacket",
"(",
"self",
".",
"_handle",
",",
"buf_send",
",",
"buffer_length",
")"
] | send a buffer as a packet to the network interface
:param packet_buffer: buffer to send (length shouldn't exceed MAX_INT) | [
"send",
"a",
"buffer",
"as",
"a",
"packet",
"to",
"the",
"network",
"interface",
":",
"param",
"packet_buffer",
":",
"buffer",
"to",
"send",
"(",
"length",
"shouldn",
"t",
"exceed",
"MAX_INT",
")"
] | train | https://github.com/orweis/winpcapy/blob/47ddb7f34de865c7ff6391c73b36b3af9a595158/winpcapy/winpcapy.py#L133-L143 |
orweis/winpcapy | winpcapy/winpcapy.py | WinPcapUtils.capture_on | def capture_on(pattern, callback):
"""
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param callback: a function to call with each intercepted packet
"""
device_name, desc = WinPcapDevices.get_matching_device(pattern)
if device_name is not None:
with WinPcap(device_name) as capture:
capture.run(callback=callback) | python | def capture_on(pattern, callback):
"""
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param callback: a function to call with each intercepted packet
"""
device_name, desc = WinPcapDevices.get_matching_device(pattern)
if device_name is not None:
with WinPcap(device_name) as capture:
capture.run(callback=callback) | [
"def",
"capture_on",
"(",
"pattern",
",",
"callback",
")",
":",
"device_name",
",",
"desc",
"=",
"WinPcapDevices",
".",
"get_matching_device",
"(",
"pattern",
")",
"if",
"device_name",
"is",
"not",
"None",
":",
"with",
"WinPcap",
"(",
"device_name",
")",
"as",
"capture",
":",
"capture",
".",
"run",
"(",
"callback",
"=",
"callback",
")"
] | :param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param callback: a function to call with each intercepted packet | [
":",
"param",
"pattern",
":",
"a",
"wildcard",
"pattern",
"to",
"match",
"the",
"description",
"of",
"a",
"network",
"interface",
"to",
"capture",
"packets",
"on",
":",
"param",
"callback",
":",
"a",
"function",
"to",
"call",
"with",
"each",
"intercepted",
"packet"
] | train | https://github.com/orweis/winpcapy/blob/47ddb7f34de865c7ff6391c73b36b3af9a595158/winpcapy/winpcapy.py#L163-L171 |
orweis/winpcapy | winpcapy/winpcapy.py | WinPcapUtils.capture_on_device_name | def capture_on_device_name(device_name, callback):
"""
:param device_name: the name (guid) of a device as provided by WinPcapDevices.list_devices()
:param callback: a function to call with each intercepted packet
"""
with WinPcap(device_name) as capture:
capture.run(callback=callback) | python | def capture_on_device_name(device_name, callback):
"""
:param device_name: the name (guid) of a device as provided by WinPcapDevices.list_devices()
:param callback: a function to call with each intercepted packet
"""
with WinPcap(device_name) as capture:
capture.run(callback=callback) | [
"def",
"capture_on_device_name",
"(",
"device_name",
",",
"callback",
")",
":",
"with",
"WinPcap",
"(",
"device_name",
")",
"as",
"capture",
":",
"capture",
".",
"run",
"(",
"callback",
"=",
"callback",
")"
] | :param device_name: the name (guid) of a device as provided by WinPcapDevices.list_devices()
:param callback: a function to call with each intercepted packet | [
":",
"param",
"device_name",
":",
"the",
"name",
"(",
"guid",
")",
"of",
"a",
"device",
"as",
"provided",
"by",
"WinPcapDevices",
".",
"list_devices",
"()",
":",
"param",
"callback",
":",
"a",
"function",
"to",
"call",
"with",
"each",
"intercepted",
"packet"
] | train | https://github.com/orweis/winpcapy/blob/47ddb7f34de865c7ff6391c73b36b3af9a595158/winpcapy/winpcapy.py#L174-L180 |
orweis/winpcapy | winpcapy/winpcapy.py | WinPcapUtils.send_packet | def send_packet(self, pattern, packet_buffer, callback=None, limit=10):
"""
Send a buffer as a packet to a network interface and optionally capture a response
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT)
:param callback: If not None, a function to call with each intercepted packet
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
"""
device_name, desc = WinPcapDevices.get_matching_device(pattern)
if device_name is not None:
with WinPcap(device_name) as capture:
capture.send(packet_buffer)
if callback is not None:
capture.run(callback=callback, limit=limit) | python | def send_packet(self, pattern, packet_buffer, callback=None, limit=10):
"""
Send a buffer as a packet to a network interface and optionally capture a response
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT)
:param callback: If not None, a function to call with each intercepted packet
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity)
"""
device_name, desc = WinPcapDevices.get_matching_device(pattern)
if device_name is not None:
with WinPcap(device_name) as capture:
capture.send(packet_buffer)
if callback is not None:
capture.run(callback=callback, limit=limit) | [
"def",
"send_packet",
"(",
"self",
",",
"pattern",
",",
"packet_buffer",
",",
"callback",
"=",
"None",
",",
"limit",
"=",
"10",
")",
":",
"device_name",
",",
"desc",
"=",
"WinPcapDevices",
".",
"get_matching_device",
"(",
"pattern",
")",
"if",
"device_name",
"is",
"not",
"None",
":",
"with",
"WinPcap",
"(",
"device_name",
")",
"as",
"capture",
":",
"capture",
".",
"send",
"(",
"packet_buffer",
")",
"if",
"callback",
"is",
"not",
"None",
":",
"capture",
".",
"run",
"(",
"callback",
"=",
"callback",
",",
"limit",
"=",
"limit",
")"
] | Send a buffer as a packet to a network interface and optionally capture a response
:param pattern: a wildcard pattern to match the description of a network interface to capture packets on
:param packet_buffer: a buffer to send (length shouldn't exceed MAX_INT)
:param callback: If not None, a function to call with each intercepted packet
:param limit: how many packets to capture (A value of -1 or 0 is equivalent to infinity) | [
"Send",
"a",
"buffer",
"as",
"a",
"packet",
"to",
"a",
"network",
"interface",
"and",
"optionally",
"capture",
"a",
"response",
":",
"param",
"pattern",
":",
"a",
"wildcard",
"pattern",
"to",
"match",
"the",
"description",
"of",
"a",
"network",
"interface",
"to",
"capture",
"packets",
"on",
":",
"param",
"packet_buffer",
":",
"a",
"buffer",
"to",
"send",
"(",
"length",
"shouldn",
"t",
"exceed",
"MAX_INT",
")",
":",
"param",
"callback",
":",
"If",
"not",
"None",
"a",
"function",
"to",
"call",
"with",
"each",
"intercepted",
"packet",
":",
"param",
"limit",
":",
"how",
"many",
"packets",
"to",
"capture",
"(",
"A",
"value",
"of",
"-",
"1",
"or",
"0",
"is",
"equivalent",
"to",
"infinity",
")"
] | train | https://github.com/orweis/winpcapy/blob/47ddb7f34de865c7ff6391c73b36b3af9a595158/winpcapy/winpcapy.py#L191-L204 |
aaugustin/django-sequences | sequences/__init__.py | get_next_value | def get_next_value(
sequence_name='default', initial_value=1, reset_value=None,
*, nowait=False, using=None):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last | python | def get_next_value(
sequence_name='default', initial_value=1, reset_value=None,
*, nowait=False, using=None):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last | [
"def",
"get_next_value",
"(",
"sequence_name",
"=",
"'default'",
",",
"initial_value",
"=",
"1",
",",
"reset_value",
"=",
"None",
",",
"*",
",",
"nowait",
"=",
"False",
",",
"using",
"=",
"None",
")",
":",
"# Inner import because models cannot be imported before their application.",
"from",
".",
"models",
"import",
"Sequence",
"if",
"reset_value",
"is",
"not",
"None",
":",
"assert",
"initial_value",
"<",
"reset_value",
"if",
"using",
"is",
"None",
":",
"using",
"=",
"router",
".",
"db_for_write",
"(",
"Sequence",
")",
"connection",
"=",
"connections",
"[",
"using",
"]",
"if",
"(",
"getattr",
"(",
"connection",
",",
"'pg_version'",
",",
"0",
")",
">=",
"90500",
"and",
"reset_value",
"is",
"None",
"and",
"not",
"nowait",
")",
":",
"# PostgreSQL ≥ 9.5 supports \"upsert\".",
"with",
"connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"UPSERT_QUERY",
",",
"[",
"sequence_name",
",",
"initial_value",
"]",
")",
"last",
",",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"return",
"last",
"else",
":",
"# Other databases require making more database queries.",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"using",
",",
"savepoint",
"=",
"False",
")",
":",
"sequence",
",",
"created",
"=",
"(",
"Sequence",
".",
"objects",
".",
"select_for_update",
"(",
"nowait",
"=",
"nowait",
")",
".",
"get_or_create",
"(",
"name",
"=",
"sequence_name",
",",
"defaults",
"=",
"{",
"'last'",
":",
"initial_value",
"}",
")",
")",
"if",
"not",
"created",
":",
"sequence",
".",
"last",
"+=",
"1",
"if",
"reset_value",
"is",
"not",
"None",
"and",
"sequence",
".",
"last",
">=",
"reset_value",
":",
"sequence",
".",
"last",
"=",
"initial_value",
"sequence",
".",
"save",
"(",
")",
"return",
"sequence",
".",
"last"
] | Return the next value for a given sequence. | [
"Return",
"the",
"next",
"value",
"for",
"a",
"given",
"sequence",
"."
] | train | https://github.com/aaugustin/django-sequences/blob/0228ae003540ccb63be4a456fb8f63a2f4038de6/sequences/__init__.py#L13-L59 |
python-tap/tappy | tap/rules.py | Rules.check | def check(self, final_line_count):
"""Check the status of all provided data and update the suite."""
if self._lines_seen["version"]:
self._process_version_lines()
self._process_plan_lines(final_line_count) | python | def check(self, final_line_count):
"""Check the status of all provided data and update the suite."""
if self._lines_seen["version"]:
self._process_version_lines()
self._process_plan_lines(final_line_count) | [
"def",
"check",
"(",
"self",
",",
"final_line_count",
")",
":",
"if",
"self",
".",
"_lines_seen",
"[",
"\"version\"",
"]",
":",
"self",
".",
"_process_version_lines",
"(",
")",
"self",
".",
"_process_plan_lines",
"(",
"final_line_count",
")"
] | Check the status of all provided data and update the suite. | [
"Check",
"the",
"status",
"of",
"all",
"provided",
"data",
"and",
"update",
"the",
"suite",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L15-L19 |
python-tap/tappy | tap/rules.py | Rules._process_version_lines | def _process_version_lines(self):
"""Process version line rules."""
if len(self._lines_seen["version"]) > 1:
self._add_error(_("Multiple version lines appeared."))
elif self._lines_seen["version"][0] != 1:
self._add_error(_("The version must be on the first line.")) | python | def _process_version_lines(self):
"""Process version line rules."""
if len(self._lines_seen["version"]) > 1:
self._add_error(_("Multiple version lines appeared."))
elif self._lines_seen["version"][0] != 1:
self._add_error(_("The version must be on the first line.")) | [
"def",
"_process_version_lines",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_lines_seen",
"[",
"\"version\"",
"]",
")",
">",
"1",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"Multiple version lines appeared.\"",
")",
")",
"elif",
"self",
".",
"_lines_seen",
"[",
"\"version\"",
"]",
"[",
"0",
"]",
"!=",
"1",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"The version must be on the first line.\"",
")",
")"
] | Process version line rules. | [
"Process",
"version",
"line",
"rules",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L21-L26 |
python-tap/tappy | tap/rules.py | Rules._process_plan_lines | def _process_plan_lines(self, final_line_count):
"""Process plan line rules."""
if not self._lines_seen["plan"]:
self._add_error(_("Missing a plan."))
return
if len(self._lines_seen["plan"]) > 1:
self._add_error(_("Only one plan line is permitted per file."))
return
plan, at_line = self._lines_seen["plan"][0]
if not self._plan_on_valid_line(at_line, final_line_count):
self._add_error(
_("A plan must appear at the beginning or end of the file.")
)
return
if plan.expected_tests != self._lines_seen["test"]:
self._add_error(
_("Expected {expected_count} tests but only {seen_count} ran.").format(
expected_count=plan.expected_tests,
seen_count=self._lines_seen["test"],
)
) | python | def _process_plan_lines(self, final_line_count):
"""Process plan line rules."""
if not self._lines_seen["plan"]:
self._add_error(_("Missing a plan."))
return
if len(self._lines_seen["plan"]) > 1:
self._add_error(_("Only one plan line is permitted per file."))
return
plan, at_line = self._lines_seen["plan"][0]
if not self._plan_on_valid_line(at_line, final_line_count):
self._add_error(
_("A plan must appear at the beginning or end of the file.")
)
return
if plan.expected_tests != self._lines_seen["test"]:
self._add_error(
_("Expected {expected_count} tests but only {seen_count} ran.").format(
expected_count=plan.expected_tests,
seen_count=self._lines_seen["test"],
)
) | [
"def",
"_process_plan_lines",
"(",
"self",
",",
"final_line_count",
")",
":",
"if",
"not",
"self",
".",
"_lines_seen",
"[",
"\"plan\"",
"]",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"Missing a plan.\"",
")",
")",
"return",
"if",
"len",
"(",
"self",
".",
"_lines_seen",
"[",
"\"plan\"",
"]",
")",
">",
"1",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"Only one plan line is permitted per file.\"",
")",
")",
"return",
"plan",
",",
"at_line",
"=",
"self",
".",
"_lines_seen",
"[",
"\"plan\"",
"]",
"[",
"0",
"]",
"if",
"not",
"self",
".",
"_plan_on_valid_line",
"(",
"at_line",
",",
"final_line_count",
")",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"A plan must appear at the beginning or end of the file.\"",
")",
")",
"return",
"if",
"plan",
".",
"expected_tests",
"!=",
"self",
".",
"_lines_seen",
"[",
"\"test\"",
"]",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"Expected {expected_count} tests but only {seen_count} ran.\"",
")",
".",
"format",
"(",
"expected_count",
"=",
"plan",
".",
"expected_tests",
",",
"seen_count",
"=",
"self",
".",
"_lines_seen",
"[",
"\"test\"",
"]",
",",
")",
")"
] | Process plan line rules. | [
"Process",
"plan",
"line",
"rules",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L28-L51 |
python-tap/tappy | tap/rules.py | Rules._plan_on_valid_line | def _plan_on_valid_line(self, at_line, final_line_count):
"""Check if a plan is on a valid line."""
# Put the common cases first.
if at_line == 1 or at_line == final_line_count:
return True
# The plan may only appear on line 2 if the version is at line 1.
after_version = (
self._lines_seen["version"]
and self._lines_seen["version"][0] == 1
and at_line == 2
)
if after_version:
return True
return False | python | def _plan_on_valid_line(self, at_line, final_line_count):
"""Check if a plan is on a valid line."""
# Put the common cases first.
if at_line == 1 or at_line == final_line_count:
return True
# The plan may only appear on line 2 if the version is at line 1.
after_version = (
self._lines_seen["version"]
and self._lines_seen["version"][0] == 1
and at_line == 2
)
if after_version:
return True
return False | [
"def",
"_plan_on_valid_line",
"(",
"self",
",",
"at_line",
",",
"final_line_count",
")",
":",
"# Put the common cases first.",
"if",
"at_line",
"==",
"1",
"or",
"at_line",
"==",
"final_line_count",
":",
"return",
"True",
"# The plan may only appear on line 2 if the version is at line 1.",
"after_version",
"=",
"(",
"self",
".",
"_lines_seen",
"[",
"\"version\"",
"]",
"and",
"self",
".",
"_lines_seen",
"[",
"\"version\"",
"]",
"[",
"0",
"]",
"==",
"1",
"and",
"at_line",
"==",
"2",
")",
"if",
"after_version",
":",
"return",
"True",
"return",
"False"
] | Check if a plan is on a valid line. | [
"Check",
"if",
"a",
"plan",
"is",
"on",
"a",
"valid",
"line",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L53-L68 |
python-tap/tappy | tap/rules.py | Rules.handle_bail | def handle_bail(self, bail):
"""Handle a bail line."""
self._add_error(_("Bailed: {reason}").format(reason=bail.reason)) | python | def handle_bail(self, bail):
"""Handle a bail line."""
self._add_error(_("Bailed: {reason}").format(reason=bail.reason)) | [
"def",
"handle_bail",
"(",
"self",
",",
"bail",
")",
":",
"self",
".",
"_add_error",
"(",
"_",
"(",
"\"Bailed: {reason}\"",
")",
".",
"format",
"(",
"reason",
"=",
"bail",
".",
"reason",
")",
")"
] | Handle a bail line. | [
"Handle",
"a",
"bail",
"line",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L70-L72 |
python-tap/tappy | tap/rules.py | Rules.handle_skipping_plan | def handle_skipping_plan(self, skip_plan):
"""Handle a plan that contains a SKIP directive."""
skip_line = Result(True, None, skip_plan.directive.text, Directive("SKIP"))
self._suite.addTest(Adapter(self._filename, skip_line)) | python | def handle_skipping_plan(self, skip_plan):
"""Handle a plan that contains a SKIP directive."""
skip_line = Result(True, None, skip_plan.directive.text, Directive("SKIP"))
self._suite.addTest(Adapter(self._filename, skip_line)) | [
"def",
"handle_skipping_plan",
"(",
"self",
",",
"skip_plan",
")",
":",
"skip_line",
"=",
"Result",
"(",
"True",
",",
"None",
",",
"skip_plan",
".",
"directive",
".",
"text",
",",
"Directive",
"(",
"\"SKIP\"",
")",
")",
"self",
".",
"_suite",
".",
"addTest",
"(",
"Adapter",
"(",
"self",
".",
"_filename",
",",
"skip_line",
")",
")"
] | Handle a plan that contains a SKIP directive. | [
"Handle",
"a",
"plan",
"that",
"contains",
"a",
"SKIP",
"directive",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L78-L81 |
python-tap/tappy | tap/rules.py | Rules._add_error | def _add_error(self, message):
"""Add an error test to the suite."""
error_line = Result(False, None, message, Directive(""))
self._suite.addTest(Adapter(self._filename, error_line)) | python | def _add_error(self, message):
"""Add an error test to the suite."""
error_line = Result(False, None, message, Directive(""))
self._suite.addTest(Adapter(self._filename, error_line)) | [
"def",
"_add_error",
"(",
"self",
",",
"message",
")",
":",
"error_line",
"=",
"Result",
"(",
"False",
",",
"None",
",",
"message",
",",
"Directive",
"(",
"\"\"",
")",
")",
"self",
".",
"_suite",
".",
"addTest",
"(",
"Adapter",
"(",
"self",
".",
"_filename",
",",
"error_line",
")",
")"
] | Add an error test to the suite. | [
"Add",
"an",
"error",
"test",
"to",
"the",
"suite",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L95-L98 |
python-tap/tappy | tap/formatter.py | format_exception | def format_exception(exception):
"""Format an exception as diagnostics output.
exception is the tuple as expected from sys.exc_info.
"""
exception_lines = traceback.format_exception(*exception)
# The lines returned from format_exception do not strictly contain
# one line per element in the list (i.e. some elements have new
# line characters in the middle). Normalize that oddity.
lines = "".join(exception_lines).splitlines(True)
return format_as_diagnostics(lines) | python | def format_exception(exception):
"""Format an exception as diagnostics output.
exception is the tuple as expected from sys.exc_info.
"""
exception_lines = traceback.format_exception(*exception)
# The lines returned from format_exception do not strictly contain
# one line per element in the list (i.e. some elements have new
# line characters in the middle). Normalize that oddity.
lines = "".join(exception_lines).splitlines(True)
return format_as_diagnostics(lines) | [
"def",
"format_exception",
"(",
"exception",
")",
":",
"exception_lines",
"=",
"traceback",
".",
"format_exception",
"(",
"*",
"exception",
")",
"# The lines returned from format_exception do not strictly contain",
"# one line per element in the list (i.e. some elements have new",
"# line characters in the middle). Normalize that oddity.",
"lines",
"=",
"\"\"",
".",
"join",
"(",
"exception_lines",
")",
".",
"splitlines",
"(",
"True",
")",
"return",
"format_as_diagnostics",
"(",
"lines",
")"
] | Format an exception as diagnostics output.
exception is the tuple as expected from sys.exc_info. | [
"Format",
"an",
"exception",
"as",
"diagnostics",
"output",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/formatter.py#L4-L14 |
python-tap/tappy | tap/parser.py | Parser.parse | def parse(self, fh):
"""Generate tap.line.Line objects, given a file-like object `fh`.
`fh` may be any object that implements both the iterator and
context management protocol (i.e. it can be used in both a
"with" statement and a "for...in" statement.)
Trailing whitespace and newline characters will be automatically
stripped from the input lines.
"""
with fh:
try:
first_line = next(fh)
except StopIteration:
return
first_parsed = self.parse_line(first_line.rstrip())
fh_new = itertools.chain([first_line], fh)
if first_parsed.category == "version" and first_parsed.version >= 13:
if ENABLE_VERSION_13:
fh_new = peekable(itertools.chain([first_line], fh))
self._try_peeking = True
else: # pragma no cover
print(
"""
WARNING: Optional imports not found, TAP 13 output will be
ignored. To parse yaml, see requirements in docs:
https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13"""
)
for line in fh_new:
yield self.parse_line(line.rstrip(), fh_new) | python | def parse(self, fh):
"""Generate tap.line.Line objects, given a file-like object `fh`.
`fh` may be any object that implements both the iterator and
context management protocol (i.e. it can be used in both a
"with" statement and a "for...in" statement.)
Trailing whitespace and newline characters will be automatically
stripped from the input lines.
"""
with fh:
try:
first_line = next(fh)
except StopIteration:
return
first_parsed = self.parse_line(first_line.rstrip())
fh_new = itertools.chain([first_line], fh)
if first_parsed.category == "version" and first_parsed.version >= 13:
if ENABLE_VERSION_13:
fh_new = peekable(itertools.chain([first_line], fh))
self._try_peeking = True
else: # pragma no cover
print(
"""
WARNING: Optional imports not found, TAP 13 output will be
ignored. To parse yaml, see requirements in docs:
https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13"""
)
for line in fh_new:
yield self.parse_line(line.rstrip(), fh_new) | [
"def",
"parse",
"(",
"self",
",",
"fh",
")",
":",
"with",
"fh",
":",
"try",
":",
"first_line",
"=",
"next",
"(",
"fh",
")",
"except",
"StopIteration",
":",
"return",
"first_parsed",
"=",
"self",
".",
"parse_line",
"(",
"first_line",
".",
"rstrip",
"(",
")",
")",
"fh_new",
"=",
"itertools",
".",
"chain",
"(",
"[",
"first_line",
"]",
",",
"fh",
")",
"if",
"first_parsed",
".",
"category",
"==",
"\"version\"",
"and",
"first_parsed",
".",
"version",
">=",
"13",
":",
"if",
"ENABLE_VERSION_13",
":",
"fh_new",
"=",
"peekable",
"(",
"itertools",
".",
"chain",
"(",
"[",
"first_line",
"]",
",",
"fh",
")",
")",
"self",
".",
"_try_peeking",
"=",
"True",
"else",
":",
"# pragma no cover",
"print",
"(",
"\"\"\"\nWARNING: Optional imports not found, TAP 13 output will be\n ignored. To parse yaml, see requirements in docs:\n https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13\"\"\"",
")",
"for",
"line",
"in",
"fh_new",
":",
"yield",
"self",
".",
"parse_line",
"(",
"line",
".",
"rstrip",
"(",
")",
",",
"fh_new",
")"
] | Generate tap.line.Line objects, given a file-like object `fh`.
`fh` may be any object that implements both the iterator and
context management protocol (i.e. it can be used in both a
"with" statement and a "for...in" statement.)
Trailing whitespace and newline characters will be automatically
stripped from the input lines. | [
"Generate",
"tap",
".",
"line",
".",
"Line",
"objects",
"given",
"a",
"file",
"-",
"like",
"object",
"fh",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L86-L116 |
python-tap/tappy | tap/parser.py | Parser.parse_line | def parse_line(self, text, fh=None):
"""Parse a line into whatever TAP category it belongs."""
match = self.ok.match(text)
if match:
return self._parse_result(True, match, fh)
match = self.not_ok.match(text)
if match:
return self._parse_result(False, match, fh)
if self.diagnostic.match(text):
return Diagnostic(text)
match = self.plan.match(text)
if match:
return self._parse_plan(match)
match = self.bail.match(text)
if match:
return Bail(match.group("reason"))
match = self.version.match(text)
if match:
return self._parse_version(match)
return Unknown() | python | def parse_line(self, text, fh=None):
"""Parse a line into whatever TAP category it belongs."""
match = self.ok.match(text)
if match:
return self._parse_result(True, match, fh)
match = self.not_ok.match(text)
if match:
return self._parse_result(False, match, fh)
if self.diagnostic.match(text):
return Diagnostic(text)
match = self.plan.match(text)
if match:
return self._parse_plan(match)
match = self.bail.match(text)
if match:
return Bail(match.group("reason"))
match = self.version.match(text)
if match:
return self._parse_version(match)
return Unknown() | [
"def",
"parse_line",
"(",
"self",
",",
"text",
",",
"fh",
"=",
"None",
")",
":",
"match",
"=",
"self",
".",
"ok",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"return",
"self",
".",
"_parse_result",
"(",
"True",
",",
"match",
",",
"fh",
")",
"match",
"=",
"self",
".",
"not_ok",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"return",
"self",
".",
"_parse_result",
"(",
"False",
",",
"match",
",",
"fh",
")",
"if",
"self",
".",
"diagnostic",
".",
"match",
"(",
"text",
")",
":",
"return",
"Diagnostic",
"(",
"text",
")",
"match",
"=",
"self",
".",
"plan",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"return",
"self",
".",
"_parse_plan",
"(",
"match",
")",
"match",
"=",
"self",
".",
"bail",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"return",
"Bail",
"(",
"match",
".",
"group",
"(",
"\"reason\"",
")",
")",
"match",
"=",
"self",
".",
"version",
".",
"match",
"(",
"text",
")",
"if",
"match",
":",
"return",
"self",
".",
"_parse_version",
"(",
"match",
")",
"return",
"Unknown",
"(",
")"
] | Parse a line into whatever TAP category it belongs. | [
"Parse",
"a",
"line",
"into",
"whatever",
"TAP",
"category",
"it",
"belongs",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L118-L143 |
python-tap/tappy | tap/parser.py | Parser._parse_plan | def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | python | def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | [
"def",
"_parse_plan",
"(",
"self",
",",
"match",
")",
":",
"expected_tests",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"\"expected\"",
")",
")",
"directive",
"=",
"Directive",
"(",
"match",
".",
"group",
"(",
"\"directive\"",
")",
")",
"# Only SKIP directives are allowed in the plan.",
"if",
"directive",
".",
"text",
"and",
"not",
"directive",
".",
"skip",
":",
"return",
"Unknown",
"(",
")",
"return",
"Plan",
"(",
"expected_tests",
",",
"directive",
")"
] | Parse a matching plan line. | [
"Parse",
"a",
"matching",
"plan",
"line",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L145-L154 |
python-tap/tappy | tap/parser.py | Parser._parse_result | def _parse_result(self, ok, match, fh=None):
"""Parse a matching result line into a result instance."""
peek_match = None
try:
if fh is not None and self._try_peeking:
peek_match = self.yaml_block_start.match(fh.peek())
except StopIteration:
pass
if peek_match is None:
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
)
indent = peek_match.group("indent")
concat_yaml = self._extract_yaml_block(indent, fh)
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
raw_yaml_block=concat_yaml,
) | python | def _parse_result(self, ok, match, fh=None):
"""Parse a matching result line into a result instance."""
peek_match = None
try:
if fh is not None and self._try_peeking:
peek_match = self.yaml_block_start.match(fh.peek())
except StopIteration:
pass
if peek_match is None:
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
)
indent = peek_match.group("indent")
concat_yaml = self._extract_yaml_block(indent, fh)
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
raw_yaml_block=concat_yaml,
) | [
"def",
"_parse_result",
"(",
"self",
",",
"ok",
",",
"match",
",",
"fh",
"=",
"None",
")",
":",
"peek_match",
"=",
"None",
"try",
":",
"if",
"fh",
"is",
"not",
"None",
"and",
"self",
".",
"_try_peeking",
":",
"peek_match",
"=",
"self",
".",
"yaml_block_start",
".",
"match",
"(",
"fh",
".",
"peek",
"(",
")",
")",
"except",
"StopIteration",
":",
"pass",
"if",
"peek_match",
"is",
"None",
":",
"return",
"Result",
"(",
"ok",
",",
"number",
"=",
"match",
".",
"group",
"(",
"\"number\"",
")",
",",
"description",
"=",
"match",
".",
"group",
"(",
"\"description\"",
")",
".",
"strip",
"(",
")",
",",
"directive",
"=",
"Directive",
"(",
"match",
".",
"group",
"(",
"\"directive\"",
")",
")",
",",
")",
"indent",
"=",
"peek_match",
".",
"group",
"(",
"\"indent\"",
")",
"concat_yaml",
"=",
"self",
".",
"_extract_yaml_block",
"(",
"indent",
",",
"fh",
")",
"return",
"Result",
"(",
"ok",
",",
"number",
"=",
"match",
".",
"group",
"(",
"\"number\"",
")",
",",
"description",
"=",
"match",
".",
"group",
"(",
"\"description\"",
")",
".",
"strip",
"(",
")",
",",
"directive",
"=",
"Directive",
"(",
"match",
".",
"group",
"(",
"\"directive\"",
")",
")",
",",
"raw_yaml_block",
"=",
"concat_yaml",
",",
")"
] | Parse a matching result line into a result instance. | [
"Parse",
"a",
"matching",
"result",
"line",
"into",
"a",
"result",
"instance",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L156-L179 |
python-tap/tappy | tap/parser.py | Parser._extract_yaml_block | def _extract_yaml_block(self, indent, fh):
"""Extract a raw yaml block from a file handler"""
raw_yaml = []
indent_match = re.compile(r"^{}".format(indent))
try:
fh.next()
while indent_match.match(fh.peek()):
raw_yaml.append(fh.next().replace(indent, "", 1))
# check for the end and stop adding yaml if encountered
if self.yaml_block_end.match(fh.peek()):
fh.next()
break
except StopIteration:
pass
return "\n".join(raw_yaml) | python | def _extract_yaml_block(self, indent, fh):
"""Extract a raw yaml block from a file handler"""
raw_yaml = []
indent_match = re.compile(r"^{}".format(indent))
try:
fh.next()
while indent_match.match(fh.peek()):
raw_yaml.append(fh.next().replace(indent, "", 1))
# check for the end and stop adding yaml if encountered
if self.yaml_block_end.match(fh.peek()):
fh.next()
break
except StopIteration:
pass
return "\n".join(raw_yaml) | [
"def",
"_extract_yaml_block",
"(",
"self",
",",
"indent",
",",
"fh",
")",
":",
"raw_yaml",
"=",
"[",
"]",
"indent_match",
"=",
"re",
".",
"compile",
"(",
"r\"^{}\"",
".",
"format",
"(",
"indent",
")",
")",
"try",
":",
"fh",
".",
"next",
"(",
")",
"while",
"indent_match",
".",
"match",
"(",
"fh",
".",
"peek",
"(",
")",
")",
":",
"raw_yaml",
".",
"append",
"(",
"fh",
".",
"next",
"(",
")",
".",
"replace",
"(",
"indent",
",",
"\"\"",
",",
"1",
")",
")",
"# check for the end and stop adding yaml if encountered",
"if",
"self",
".",
"yaml_block_end",
".",
"match",
"(",
"fh",
".",
"peek",
"(",
")",
")",
":",
"fh",
".",
"next",
"(",
")",
"break",
"except",
"StopIteration",
":",
"pass",
"return",
"\"\\n\"",
".",
"join",
"(",
"raw_yaml",
")"
] | Extract a raw yaml block from a file handler | [
"Extract",
"a",
"raw",
"yaml",
"block",
"from",
"a",
"file",
"handler"
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L181-L195 |
python-tap/tappy | tap/line.py | Result.yaml_block | def yaml_block(self):
"""Lazy load a yaml_block.
If yaml support is not available,
there is an error in parsing the yaml block,
or no yaml is associated with this result,
``None`` will be returned.
:rtype: dict
"""
if LOAD_YAML and self._yaml_block is not None:
try:
yaml_dict = yaml.load(self._yaml_block)
return yaml_dict
except yaml.error.YAMLError:
print("Error parsing yaml block. Check formatting.")
return None | python | def yaml_block(self):
"""Lazy load a yaml_block.
If yaml support is not available,
there is an error in parsing the yaml block,
or no yaml is associated with this result,
``None`` will be returned.
:rtype: dict
"""
if LOAD_YAML and self._yaml_block is not None:
try:
yaml_dict = yaml.load(self._yaml_block)
return yaml_dict
except yaml.error.YAMLError:
print("Error parsing yaml block. Check formatting.")
return None | [
"def",
"yaml_block",
"(",
"self",
")",
":",
"if",
"LOAD_YAML",
"and",
"self",
".",
"_yaml_block",
"is",
"not",
"None",
":",
"try",
":",
"yaml_dict",
"=",
"yaml",
".",
"load",
"(",
"self",
".",
"_yaml_block",
")",
"return",
"yaml_dict",
"except",
"yaml",
".",
"error",
".",
"YAMLError",
":",
"print",
"(",
"\"Error parsing yaml block. Check formatting.\"",
")",
"return",
"None"
] | Lazy load a yaml_block.
If yaml support is not available,
there is an error in parsing the yaml block,
or no yaml is associated with this result,
``None`` will be returned.
:rtype: dict | [
"Lazy",
"load",
"a",
"yaml_block",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/line.py#L87-L103 |
python-tap/tappy | tap/loader.py | Loader.load | def load(self, files):
"""Load any files found into a suite.
Any directories are walked and their files are added as TAP files.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
for filepath in files:
if os.path.isdir(filepath):
self._find_tests_in_directory(filepath, suite)
else:
suite.addTest(self.load_suite_from_file(filepath))
return suite | python | def load(self, files):
"""Load any files found into a suite.
Any directories are walked and their files are added as TAP files.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
for filepath in files:
if os.path.isdir(filepath):
self._find_tests_in_directory(filepath, suite)
else:
suite.addTest(self.load_suite_from_file(filepath))
return suite | [
"def",
"load",
"(",
"self",
",",
"files",
")",
":",
"suite",
"=",
"unittest",
".",
"TestSuite",
"(",
")",
"for",
"filepath",
"in",
"files",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filepath",
")",
":",
"self",
".",
"_find_tests_in_directory",
"(",
"filepath",
",",
"suite",
")",
"else",
":",
"suite",
".",
"addTest",
"(",
"self",
".",
"load_suite_from_file",
"(",
"filepath",
")",
")",
"return",
"suite"
] | Load any files found into a suite.
Any directories are walked and their files are added as TAP files.
:returns: A ``unittest.TestSuite`` instance | [
"Load",
"any",
"files",
"found",
"into",
"a",
"suite",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/loader.py#L19-L32 |
python-tap/tappy | tap/loader.py | Loader.load_suite_from_file | def load_suite_from_file(self, filename):
"""Load a test suite with test lines from the provided TAP file.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules(filename, suite)
if not os.path.exists(filename):
rules.handle_file_does_not_exist()
return suite
line_generator = self._parser.parse_file(filename)
return self._load_lines(filename, line_generator, suite, rules) | python | def load_suite_from_file(self, filename):
"""Load a test suite with test lines from the provided TAP file.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules(filename, suite)
if not os.path.exists(filename):
rules.handle_file_does_not_exist()
return suite
line_generator = self._parser.parse_file(filename)
return self._load_lines(filename, line_generator, suite, rules) | [
"def",
"load_suite_from_file",
"(",
"self",
",",
"filename",
")",
":",
"suite",
"=",
"unittest",
".",
"TestSuite",
"(",
")",
"rules",
"=",
"Rules",
"(",
"filename",
",",
"suite",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"rules",
".",
"handle_file_does_not_exist",
"(",
")",
"return",
"suite",
"line_generator",
"=",
"self",
".",
"_parser",
".",
"parse_file",
"(",
"filename",
")",
"return",
"self",
".",
"_load_lines",
"(",
"filename",
",",
"line_generator",
",",
"suite",
",",
"rules",
")"
] | Load a test suite with test lines from the provided TAP file.
:returns: A ``unittest.TestSuite`` instance | [
"Load",
"a",
"test",
"suite",
"with",
"test",
"lines",
"from",
"the",
"provided",
"TAP",
"file",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/loader.py#L34-L47 |
python-tap/tappy | tap/loader.py | Loader.load_suite_from_stdin | def load_suite_from_stdin(self):
"""Load a test suite with test lines from the TAP stream on STDIN.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules("stream", suite)
line_generator = self._parser.parse_stdin()
return self._load_lines("stream", line_generator, suite, rules) | python | def load_suite_from_stdin(self):
"""Load a test suite with test lines from the TAP stream on STDIN.
:returns: A ``unittest.TestSuite`` instance
"""
suite = unittest.TestSuite()
rules = Rules("stream", suite)
line_generator = self._parser.parse_stdin()
return self._load_lines("stream", line_generator, suite, rules) | [
"def",
"load_suite_from_stdin",
"(",
"self",
")",
":",
"suite",
"=",
"unittest",
".",
"TestSuite",
"(",
")",
"rules",
"=",
"Rules",
"(",
"\"stream\"",
",",
"suite",
")",
"line_generator",
"=",
"self",
".",
"_parser",
".",
"parse_stdin",
"(",
")",
"return",
"self",
".",
"_load_lines",
"(",
"\"stream\"",
",",
"line_generator",
",",
"suite",
",",
"rules",
")"
] | Load a test suite with test lines from the TAP stream on STDIN.
:returns: A ``unittest.TestSuite`` instance | [
"Load",
"a",
"test",
"suite",
"with",
"test",
"lines",
"from",
"the",
"TAP",
"stream",
"on",
"STDIN",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/loader.py#L49-L57 |
python-tap/tappy | tap/loader.py | Loader._load_lines | def _load_lines(self, filename, line_generator, suite, rules):
"""Load a suite with lines produced by the line generator."""
line_counter = 0
for line in line_generator:
line_counter += 1
if line.category in self.ignored_lines:
continue
if line.category == "test":
suite.addTest(Adapter(filename, line))
rules.saw_test()
elif line.category == "plan":
if line.skip:
rules.handle_skipping_plan(line)
return suite
rules.saw_plan(line, line_counter)
elif line.category == "bail":
rules.handle_bail(line)
return suite
elif line.category == "version":
rules.saw_version_at(line_counter)
rules.check(line_counter)
return suite | python | def _load_lines(self, filename, line_generator, suite, rules):
"""Load a suite with lines produced by the line generator."""
line_counter = 0
for line in line_generator:
line_counter += 1
if line.category in self.ignored_lines:
continue
if line.category == "test":
suite.addTest(Adapter(filename, line))
rules.saw_test()
elif line.category == "plan":
if line.skip:
rules.handle_skipping_plan(line)
return suite
rules.saw_plan(line, line_counter)
elif line.category == "bail":
rules.handle_bail(line)
return suite
elif line.category == "version":
rules.saw_version_at(line_counter)
rules.check(line_counter)
return suite | [
"def",
"_load_lines",
"(",
"self",
",",
"filename",
",",
"line_generator",
",",
"suite",
",",
"rules",
")",
":",
"line_counter",
"=",
"0",
"for",
"line",
"in",
"line_generator",
":",
"line_counter",
"+=",
"1",
"if",
"line",
".",
"category",
"in",
"self",
".",
"ignored_lines",
":",
"continue",
"if",
"line",
".",
"category",
"==",
"\"test\"",
":",
"suite",
".",
"addTest",
"(",
"Adapter",
"(",
"filename",
",",
"line",
")",
")",
"rules",
".",
"saw_test",
"(",
")",
"elif",
"line",
".",
"category",
"==",
"\"plan\"",
":",
"if",
"line",
".",
"skip",
":",
"rules",
".",
"handle_skipping_plan",
"(",
"line",
")",
"return",
"suite",
"rules",
".",
"saw_plan",
"(",
"line",
",",
"line_counter",
")",
"elif",
"line",
".",
"category",
"==",
"\"bail\"",
":",
"rules",
".",
"handle_bail",
"(",
"line",
")",
"return",
"suite",
"elif",
"line",
".",
"category",
"==",
"\"version\"",
":",
"rules",
".",
"saw_version_at",
"(",
"line_counter",
")",
"rules",
".",
"check",
"(",
"line_counter",
")",
"return",
"suite"
] | Load a suite with lines produced by the line generator. | [
"Load",
"a",
"suite",
"with",
"lines",
"produced",
"by",
"the",
"line",
"generator",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/loader.py#L66-L90 |
python-tap/tappy | tap/tracker.py | Tracker._track | def _track(self, class_name):
"""Keep track of which test cases have executed."""
if self._test_cases.get(class_name) is None:
if self.streaming and self.header:
self._write_test_case_header(class_name, self.stream)
self._test_cases[class_name] = []
if self.combined:
self.combined_test_cases_seen.append(class_name) | python | def _track(self, class_name):
"""Keep track of which test cases have executed."""
if self._test_cases.get(class_name) is None:
if self.streaming and self.header:
self._write_test_case_header(class_name, self.stream)
self._test_cases[class_name] = []
if self.combined:
self.combined_test_cases_seen.append(class_name) | [
"def",
"_track",
"(",
"self",
",",
"class_name",
")",
":",
"if",
"self",
".",
"_test_cases",
".",
"get",
"(",
"class_name",
")",
"is",
"None",
":",
"if",
"self",
".",
"streaming",
"and",
"self",
".",
"header",
":",
"self",
".",
"_write_test_case_header",
"(",
"class_name",
",",
"self",
".",
"stream",
")",
"self",
".",
"_test_cases",
"[",
"class_name",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"combined",
":",
"self",
".",
"combined_test_cases_seen",
".",
"append",
"(",
"class_name",
")"
] | Keep track of which test cases have executed. | [
"Keep",
"track",
"of",
"which",
"test",
"cases",
"have",
"executed",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/tracker.py#L76-L84 |
python-tap/tappy | tap/tracker.py | Tracker.set_plan | def set_plan(self, total):
"""Notify the tracker how many total tests there will be."""
self.plan = total
if self.streaming:
# This will only write the plan if we haven't written it
# already but we want to check if we already wrote a
# test out (in which case we can't just write the plan out
# right here).
if not self.combined_test_cases_seen:
self._write_plan(self.stream)
elif not self.combined:
raise ValueError(
"set_plan can only be used with combined or streaming output"
) | python | def set_plan(self, total):
"""Notify the tracker how many total tests there will be."""
self.plan = total
if self.streaming:
# This will only write the plan if we haven't written it
# already but we want to check if we already wrote a
# test out (in which case we can't just write the plan out
# right here).
if not self.combined_test_cases_seen:
self._write_plan(self.stream)
elif not self.combined:
raise ValueError(
"set_plan can only be used with combined or streaming output"
) | [
"def",
"set_plan",
"(",
"self",
",",
"total",
")",
":",
"self",
".",
"plan",
"=",
"total",
"if",
"self",
".",
"streaming",
":",
"# This will only write the plan if we haven't written it",
"# already but we want to check if we already wrote a",
"# test out (in which case we can't just write the plan out",
"# right here).",
"if",
"not",
"self",
".",
"combined_test_cases_seen",
":",
"self",
".",
"_write_plan",
"(",
"self",
".",
"stream",
")",
"elif",
"not",
"self",
".",
"combined",
":",
"raise",
"ValueError",
"(",
"\"set_plan can only be used with combined or streaming output\"",
")"
] | Notify the tracker how many total tests there will be. | [
"Notify",
"the",
"tracker",
"how",
"many",
"total",
"tests",
"there",
"will",
"be",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/tracker.py#L134-L147 |
python-tap/tappy | tap/tracker.py | Tracker.generate_tap_reports | def generate_tap_reports(self):
"""Generate TAP reports.
The results are either combined into a single output file or
the output file name is generated from the test case.
"""
# We're streaming but set_plan wasn't called, so we can only
# know the plan now (at the end).
if self.streaming and not self._plan_written:
print("1..{0}".format(self.combined_line_number), file=self.stream)
self._plan_written = True
return
if self.combined:
combined_file = "testresults.tap"
if self.outdir:
combined_file = os.path.join(self.outdir, combined_file)
with open(combined_file, "w") as out_file:
self._write_tap_version(out_file)
if self.plan is not None:
print("1..{0}".format(self.plan), file=out_file)
for test_case in self.combined_test_cases_seen:
self.generate_tap_report(
test_case, self._test_cases[test_case], out_file
)
if self.plan is None:
print("1..{0}".format(self.combined_line_number), file=out_file)
else:
for test_case, tap_lines in self._test_cases.items():
with open(self._get_tap_file_path(test_case), "w") as out_file:
self._write_tap_version(out_file)
self.generate_tap_report(test_case, tap_lines, out_file) | python | def generate_tap_reports(self):
"""Generate TAP reports.
The results are either combined into a single output file or
the output file name is generated from the test case.
"""
# We're streaming but set_plan wasn't called, so we can only
# know the plan now (at the end).
if self.streaming and not self._plan_written:
print("1..{0}".format(self.combined_line_number), file=self.stream)
self._plan_written = True
return
if self.combined:
combined_file = "testresults.tap"
if self.outdir:
combined_file = os.path.join(self.outdir, combined_file)
with open(combined_file, "w") as out_file:
self._write_tap_version(out_file)
if self.plan is not None:
print("1..{0}".format(self.plan), file=out_file)
for test_case in self.combined_test_cases_seen:
self.generate_tap_report(
test_case, self._test_cases[test_case], out_file
)
if self.plan is None:
print("1..{0}".format(self.combined_line_number), file=out_file)
else:
for test_case, tap_lines in self._test_cases.items():
with open(self._get_tap_file_path(test_case), "w") as out_file:
self._write_tap_version(out_file)
self.generate_tap_report(test_case, tap_lines, out_file) | [
"def",
"generate_tap_reports",
"(",
"self",
")",
":",
"# We're streaming but set_plan wasn't called, so we can only",
"# know the plan now (at the end).",
"if",
"self",
".",
"streaming",
"and",
"not",
"self",
".",
"_plan_written",
":",
"print",
"(",
"\"1..{0}\"",
".",
"format",
"(",
"self",
".",
"combined_line_number",
")",
",",
"file",
"=",
"self",
".",
"stream",
")",
"self",
".",
"_plan_written",
"=",
"True",
"return",
"if",
"self",
".",
"combined",
":",
"combined_file",
"=",
"\"testresults.tap\"",
"if",
"self",
".",
"outdir",
":",
"combined_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"combined_file",
")",
"with",
"open",
"(",
"combined_file",
",",
"\"w\"",
")",
"as",
"out_file",
":",
"self",
".",
"_write_tap_version",
"(",
"out_file",
")",
"if",
"self",
".",
"plan",
"is",
"not",
"None",
":",
"print",
"(",
"\"1..{0}\"",
".",
"format",
"(",
"self",
".",
"plan",
")",
",",
"file",
"=",
"out_file",
")",
"for",
"test_case",
"in",
"self",
".",
"combined_test_cases_seen",
":",
"self",
".",
"generate_tap_report",
"(",
"test_case",
",",
"self",
".",
"_test_cases",
"[",
"test_case",
"]",
",",
"out_file",
")",
"if",
"self",
".",
"plan",
"is",
"None",
":",
"print",
"(",
"\"1..{0}\"",
".",
"format",
"(",
"self",
".",
"combined_line_number",
")",
",",
"file",
"=",
"out_file",
")",
"else",
":",
"for",
"test_case",
",",
"tap_lines",
"in",
"self",
".",
"_test_cases",
".",
"items",
"(",
")",
":",
"with",
"open",
"(",
"self",
".",
"_get_tap_file_path",
"(",
"test_case",
")",
",",
"\"w\"",
")",
"as",
"out_file",
":",
"self",
".",
"_write_tap_version",
"(",
"out_file",
")",
"self",
".",
"generate_tap_report",
"(",
"test_case",
",",
"tap_lines",
",",
"out_file",
")"
] | Generate TAP reports.
The results are either combined into a single output file or
the output file name is generated from the test case. | [
"Generate",
"TAP",
"reports",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/tracker.py#L149-L180 |
python-tap/tappy | tap/tracker.py | Tracker._write_plan | def _write_plan(self, stream):
"""Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream.
"""
if self.plan is not None:
if not self._plan_written:
print("1..{0}".format(self.plan), file=stream)
self._plan_written = True | python | def _write_plan(self, stream):
"""Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream.
"""
if self.plan is not None:
if not self._plan_written:
print("1..{0}".format(self.plan), file=stream)
self._plan_written = True | [
"def",
"_write_plan",
"(",
"self",
",",
"stream",
")",
":",
"if",
"self",
".",
"plan",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"_plan_written",
":",
"print",
"(",
"\"1..{0}\"",
".",
"format",
"(",
"self",
".",
"plan",
")",
",",
"file",
"=",
"stream",
")",
"self",
".",
"_plan_written",
"=",
"True"
] | Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream. | [
"Write",
"the",
"plan",
"line",
"to",
"the",
"stream",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/tracker.py#L201-L210 |
python-tap/tappy | tap/tracker.py | Tracker._get_tap_file_path | def _get_tap_file_path(self, test_case):
"""Get the TAP output file path for the test case."""
sanitized_test_case = test_case.translate(self._sanitized_table)
tap_file = sanitized_test_case + ".tap"
if self.outdir:
return os.path.join(self.outdir, tap_file)
return tap_file | python | def _get_tap_file_path(self, test_case):
"""Get the TAP output file path for the test case."""
sanitized_test_case = test_case.translate(self._sanitized_table)
tap_file = sanitized_test_case + ".tap"
if self.outdir:
return os.path.join(self.outdir, tap_file)
return tap_file | [
"def",
"_get_tap_file_path",
"(",
"self",
",",
"test_case",
")",
":",
"sanitized_test_case",
"=",
"test_case",
".",
"translate",
"(",
"self",
".",
"_sanitized_table",
")",
"tap_file",
"=",
"sanitized_test_case",
"+",
"\".tap\"",
"if",
"self",
".",
"outdir",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"tap_file",
")",
"return",
"tap_file"
] | Get the TAP output file path for the test case. | [
"Get",
"the",
"TAP",
"output",
"file",
"path",
"for",
"the",
"test",
"case",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/tracker.py#L217-L223 |
python-tap/tappy | tap/main.py | main | def main(argv=sys.argv, stream=sys.stderr):
"""Entry point for ``tappy`` command."""
args = parse_args(argv)
suite = build_suite(args)
runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream)
result = runner.run(suite)
return get_status(result) | python | def main(argv=sys.argv, stream=sys.stderr):
"""Entry point for ``tappy`` command."""
args = parse_args(argv)
suite = build_suite(args)
runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream)
result = runner.run(suite)
return get_status(result) | [
"def",
"main",
"(",
"argv",
"=",
"sys",
".",
"argv",
",",
"stream",
"=",
"sys",
".",
"stderr",
")",
":",
"args",
"=",
"parse_args",
"(",
"argv",
")",
"suite",
"=",
"build_suite",
"(",
"args",
")",
"runner",
"=",
"unittest",
".",
"TextTestRunner",
"(",
"verbosity",
"=",
"args",
".",
"verbose",
",",
"stream",
"=",
"stream",
")",
"result",
"=",
"runner",
".",
"run",
"(",
"suite",
")",
"return",
"get_status",
"(",
"result",
")"
] | Entry point for ``tappy`` command. | [
"Entry",
"point",
"for",
"tappy",
"command",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/main.py#L11-L18 |
python-tap/tappy | tap/main.py | build_suite | def build_suite(args):
"""Build a test suite by loading TAP files or a TAP stream."""
loader = Loader()
if len(args.files) == 0 or args.files[0] == "-":
suite = loader.load_suite_from_stdin()
else:
suite = loader.load(args.files)
return suite | python | def build_suite(args):
"""Build a test suite by loading TAP files or a TAP stream."""
loader = Loader()
if len(args.files) == 0 or args.files[0] == "-":
suite = loader.load_suite_from_stdin()
else:
suite = loader.load(args.files)
return suite | [
"def",
"build_suite",
"(",
"args",
")",
":",
"loader",
"=",
"Loader",
"(",
")",
"if",
"len",
"(",
"args",
".",
"files",
")",
"==",
"0",
"or",
"args",
".",
"files",
"[",
"0",
"]",
"==",
"\"-\"",
":",
"suite",
"=",
"loader",
".",
"load_suite_from_stdin",
"(",
")",
"else",
":",
"suite",
"=",
"loader",
".",
"load",
"(",
"args",
".",
"files",
")",
"return",
"suite"
] | Build a test suite by loading TAP files or a TAP stream. | [
"Build",
"a",
"test",
"suite",
"by",
"loading",
"TAP",
"files",
"or",
"a",
"TAP",
"stream",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/main.py#L21-L28 |
python-tap/tappy | tap/adapter.py | Adapter.addFailure | def addFailure(self, result):
"""Add a failure to the result."""
result.addFailure(self, (Exception, Exception(), None))
# Since TAP will not provide assertion data, clean up the assertion
# section so it is not so spaced out.
test, err = result.failures[-1]
result.failures[-1] = (test, "") | python | def addFailure(self, result):
"""Add a failure to the result."""
result.addFailure(self, (Exception, Exception(), None))
# Since TAP will not provide assertion data, clean up the assertion
# section so it is not so spaced out.
test, err = result.failures[-1]
result.failures[-1] = (test, "") | [
"def",
"addFailure",
"(",
"self",
",",
"result",
")",
":",
"result",
".",
"addFailure",
"(",
"self",
",",
"(",
"Exception",
",",
"Exception",
"(",
")",
",",
"None",
")",
")",
"# Since TAP will not provide assertion data, clean up the assertion",
"# section so it is not so spaced out.",
"test",
",",
"err",
"=",
"result",
".",
"failures",
"[",
"-",
"1",
"]",
"result",
".",
"failures",
"[",
"-",
"1",
"]",
"=",
"(",
"test",
",",
"\"\"",
")"
] | Add a failure to the result. | [
"Add",
"a",
"failure",
"to",
"the",
"result",
"."
] | train | https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/adapter.py#L43-L49 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/events.py | mptt_before_insert | def mptt_before_insert(mapper, connection, instance):
""" Based on example
https://bitbucket.org/zzzeek/sqlalchemy/src/73095b353124/examples/nested_sets/nested_sets.py?at=master
"""
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
table_pk = getattr(table.c, db_pk.name)
if instance.parent_id is None:
instance.left = 1
instance.right = 2
instance.level = instance.get_default_level()
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
) or 1
instance.tree_id = tree_id
else:
(parent_pos_left,
parent_pos_right,
parent_tree_id,
parent_level) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
# Update key of right side
connection.execute(
table.update(
and_(table.c.rgt >= parent_pos_right,
table.c.tree_id == parent_tree_id)
).values(
lft=case(
[
(
table.c.lft > parent_pos_right,
table.c.lft + 2
)
],
else_=table.c.lft
),
rgt=case(
[
(
table.c.rgt >= parent_pos_right,
table.c.rgt + 2
)
],
else_=table.c.rgt
)
)
)
instance.level = parent_level + 1
instance.tree_id = parent_tree_id
instance.left = parent_pos_right
instance.right = parent_pos_right + 1 | python | def mptt_before_insert(mapper, connection, instance):
""" Based on example
https://bitbucket.org/zzzeek/sqlalchemy/src/73095b353124/examples/nested_sets/nested_sets.py?at=master
"""
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
table_pk = getattr(table.c, db_pk.name)
if instance.parent_id is None:
instance.left = 1
instance.right = 2
instance.level = instance.get_default_level()
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
) or 1
instance.tree_id = tree_id
else:
(parent_pos_left,
parent_pos_right,
parent_tree_id,
parent_level) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
# Update key of right side
connection.execute(
table.update(
and_(table.c.rgt >= parent_pos_right,
table.c.tree_id == parent_tree_id)
).values(
lft=case(
[
(
table.c.lft > parent_pos_right,
table.c.lft + 2
)
],
else_=table.c.lft
),
rgt=case(
[
(
table.c.rgt >= parent_pos_right,
table.c.rgt + 2
)
],
else_=table.c.rgt
)
)
)
instance.level = parent_level + 1
instance.tree_id = parent_tree_id
instance.left = parent_pos_right
instance.right = parent_pos_right + 1 | [
"def",
"mptt_before_insert",
"(",
"mapper",
",",
"connection",
",",
"instance",
")",
":",
"table",
"=",
"_get_tree_table",
"(",
"mapper",
")",
"db_pk",
"=",
"instance",
".",
"get_pk_column",
"(",
")",
"table_pk",
"=",
"getattr",
"(",
"table",
".",
"c",
",",
"db_pk",
".",
"name",
")",
"if",
"instance",
".",
"parent_id",
"is",
"None",
":",
"instance",
".",
"left",
"=",
"1",
"instance",
".",
"right",
"=",
"2",
"instance",
".",
"level",
"=",
"instance",
".",
"get_default_level",
"(",
")",
"tree_id",
"=",
"connection",
".",
"scalar",
"(",
"select",
"(",
"[",
"func",
".",
"max",
"(",
"table",
".",
"c",
".",
"tree_id",
")",
"+",
"1",
"]",
")",
")",
"or",
"1",
"instance",
".",
"tree_id",
"=",
"tree_id",
"else",
":",
"(",
"parent_pos_left",
",",
"parent_pos_right",
",",
"parent_tree_id",
",",
"parent_level",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"tree_id",
",",
"table",
".",
"c",
".",
"level",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"instance",
".",
"parent_id",
")",
")",
".",
"fetchone",
"(",
")",
"# Update key of right side",
"connection",
".",
"execute",
"(",
"table",
".",
"update",
"(",
"and_",
"(",
"table",
".",
"c",
".",
"rgt",
">=",
"parent_pos_right",
",",
"table",
".",
"c",
".",
"tree_id",
"==",
"parent_tree_id",
")",
")",
".",
"values",
"(",
"lft",
"=",
"case",
"(",
"[",
"(",
"table",
".",
"c",
".",
"lft",
">",
"parent_pos_right",
",",
"table",
".",
"c",
".",
"lft",
"+",
"2",
")",
"]",
",",
"else_",
"=",
"table",
".",
"c",
".",
"lft",
")",
",",
"rgt",
"=",
"case",
"(",
"[",
"(",
"table",
".",
"c",
".",
"rgt",
">=",
"parent_pos_right",
",",
"table",
".",
"c",
".",
"rgt",
"+",
"2",
")",
"]",
",",
"else_",
"=",
"table",
".",
"c",
".",
"rgt",
")",
")",
")",
"instance",
".",
"level",
"=",
"parent_level",
"+",
"1",
"instance",
".",
"tree_id",
"=",
"parent_tree_id",
"instance",
".",
"left",
"=",
"parent_pos_right",
"instance",
".",
"right",
"=",
"parent_pos_right",
"+",
"1"
] | Based on example
https://bitbucket.org/zzzeek/sqlalchemy/src/73095b353124/examples/nested_sets/nested_sets.py?at=master | [
"Based",
"on",
"example",
"https",
":",
"//",
"bitbucket",
".",
"org",
"/",
"zzzeek",
"/",
"sqlalchemy",
"/",
"src",
"/",
"73095b353124",
"/",
"examples",
"/",
"nested_sets",
"/",
"nested_sets",
".",
"py?at",
"=",
"master"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/events.py#L83-L150 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/events.py | mptt_before_update | def mptt_before_update(mapper, connection, instance):
""" Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set
"""
node_id = getattr(instance, instance.get_pk_name())
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
default_level = instance.get_default_level()
table_pk = getattr(table.c, db_pk.name)
mptt_move_inside = None
left_sibling = None
left_sibling_tree_id = None
if hasattr(instance, 'mptt_move_inside'):
mptt_move_inside = instance.mptt_move_inside
if hasattr(instance, 'mptt_move_before'):
(
right_sibling_left,
right_sibling_right,
right_sibling_parent,
right_sibling_level,
right_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.level,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_before
)
).fetchone()
current_lvl_nodes = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
and_(
table.c.level == right_sibling_level,
table.c.tree_id == right_sibling_tree_id,
table.c.lft < right_sibling_left
)
)
).fetchall()
if current_lvl_nodes:
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = current_lvl_nodes[-1]
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
# if move_before to top level
elif not right_sibling_parent:
left_sibling_tree_id = right_sibling_tree_id - 1
# if placed after a particular node
if hasattr(instance, 'mptt_move_after'):
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_after
)
).fetchone()
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
""" Get subtree from node
SELECT id, name, level FROM my_tree
WHERE left_key >= $left_key AND right_key <= $right_key
ORDER BY left_key
"""
subtree = connection.execute(
select([table_pk])
.where(
and_(
table.c.lft >= instance.left,
table.c.rgt <= instance.right,
table.c.tree_id == instance.tree_id
)
).order_by(
table.c.lft
)
).fetchall()
subtree = [x[0] for x in subtree]
""" step 0: Initialize parameters.
Put there left and right position of moving node
"""
(
node_pos_left,
node_pos_right,
node_tree_id,
node_parent_id,
node_level
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.parent_id,
table.c.level
]
).where(
table_pk == node_id
)
).fetchone()
# if instance just update w/o move
# XXX why this str() around parent_id comparison?
if not left_sibling \
and str(node_parent_id) == str(instance.parent_id) \
and not mptt_move_inside:
if left_sibling_tree_id is None:
return
# fix tree shorting
if instance.parent_id is not None:
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
if node_parent_id is None and node_tree_id == parent_tree_id:
instance.parent_id = None
return
# delete from old tree
mptt_before_delete(mapper, connection, instance, False)
if instance.parent_id is not None:
""" Put there right position of new parent node (there moving node
should be moved)
"""
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
# 'size' of moving node (including all it's sub nodes)
node_size = node_pos_right - node_pos_left + 1
# left sibling node
if not left_sibling:
left_sibling = {
'lft': parent_pos_left,
'rgt': parent_pos_right,
'is_parent': True
}
# insert subtree in exist tree
instance.tree_id = parent_tree_id
_insert_subtree(
table,
connection,
node_size,
node_pos_left,
node_pos_right,
parent_pos_left,
parent_pos_right,
subtree,
parent_tree_id,
parent_level,
node_level,
left_sibling,
table_pk
)
else:
# if insert after
if left_sibling_tree_id or left_sibling_tree_id == 0:
tree_id = left_sibling_tree_id + 1
connection.execute(
table.update(
table.c.tree_id > left_sibling_tree_id
).values(
tree_id=table.c.tree_id + 1
)
)
# if just insert
else:
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
)
connection.execute(
table.update(
table_pk.in_(
subtree
)
).values(
lft=table.c.lft - node_pos_left + 1,
rgt=table.c.rgt - node_pos_left + 1,
level=table.c.level - node_level + default_level,
tree_id=tree_id
)
) | python | def mptt_before_update(mapper, connection, instance):
""" Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set
"""
node_id = getattr(instance, instance.get_pk_name())
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
default_level = instance.get_default_level()
table_pk = getattr(table.c, db_pk.name)
mptt_move_inside = None
left_sibling = None
left_sibling_tree_id = None
if hasattr(instance, 'mptt_move_inside'):
mptt_move_inside = instance.mptt_move_inside
if hasattr(instance, 'mptt_move_before'):
(
right_sibling_left,
right_sibling_right,
right_sibling_parent,
right_sibling_level,
right_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.level,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_before
)
).fetchone()
current_lvl_nodes = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
and_(
table.c.level == right_sibling_level,
table.c.tree_id == right_sibling_tree_id,
table.c.lft < right_sibling_left
)
)
).fetchall()
if current_lvl_nodes:
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = current_lvl_nodes[-1]
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
# if move_before to top level
elif not right_sibling_parent:
left_sibling_tree_id = right_sibling_tree_id - 1
# if placed after a particular node
if hasattr(instance, 'mptt_move_after'):
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_after
)
).fetchone()
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
""" Get subtree from node
SELECT id, name, level FROM my_tree
WHERE left_key >= $left_key AND right_key <= $right_key
ORDER BY left_key
"""
subtree = connection.execute(
select([table_pk])
.where(
and_(
table.c.lft >= instance.left,
table.c.rgt <= instance.right,
table.c.tree_id == instance.tree_id
)
).order_by(
table.c.lft
)
).fetchall()
subtree = [x[0] for x in subtree]
""" step 0: Initialize parameters.
Put there left and right position of moving node
"""
(
node_pos_left,
node_pos_right,
node_tree_id,
node_parent_id,
node_level
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.parent_id,
table.c.level
]
).where(
table_pk == node_id
)
).fetchone()
# if instance just update w/o move
# XXX why this str() around parent_id comparison?
if not left_sibling \
and str(node_parent_id) == str(instance.parent_id) \
and not mptt_move_inside:
if left_sibling_tree_id is None:
return
# fix tree shorting
if instance.parent_id is not None:
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
if node_parent_id is None and node_tree_id == parent_tree_id:
instance.parent_id = None
return
# delete from old tree
mptt_before_delete(mapper, connection, instance, False)
if instance.parent_id is not None:
""" Put there right position of new parent node (there moving node
should be moved)
"""
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
# 'size' of moving node (including all it's sub nodes)
node_size = node_pos_right - node_pos_left + 1
# left sibling node
if not left_sibling:
left_sibling = {
'lft': parent_pos_left,
'rgt': parent_pos_right,
'is_parent': True
}
# insert subtree in exist tree
instance.tree_id = parent_tree_id
_insert_subtree(
table,
connection,
node_size,
node_pos_left,
node_pos_right,
parent_pos_left,
parent_pos_right,
subtree,
parent_tree_id,
parent_level,
node_level,
left_sibling,
table_pk
)
else:
# if insert after
if left_sibling_tree_id or left_sibling_tree_id == 0:
tree_id = left_sibling_tree_id + 1
connection.execute(
table.update(
table.c.tree_id > left_sibling_tree_id
).values(
tree_id=table.c.tree_id + 1
)
)
# if just insert
else:
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
)
connection.execute(
table.update(
table_pk.in_(
subtree
)
).values(
lft=table.c.lft - node_pos_left + 1,
rgt=table.c.rgt - node_pos_left + 1,
level=table.c.level - node_level + default_level,
tree_id=tree_id
)
) | [
"def",
"mptt_before_update",
"(",
"mapper",
",",
"connection",
",",
"instance",
")",
":",
"node_id",
"=",
"getattr",
"(",
"instance",
",",
"instance",
".",
"get_pk_name",
"(",
")",
")",
"table",
"=",
"_get_tree_table",
"(",
"mapper",
")",
"db_pk",
"=",
"instance",
".",
"get_pk_column",
"(",
")",
"default_level",
"=",
"instance",
".",
"get_default_level",
"(",
")",
"table_pk",
"=",
"getattr",
"(",
"table",
".",
"c",
",",
"db_pk",
".",
"name",
")",
"mptt_move_inside",
"=",
"None",
"left_sibling",
"=",
"None",
"left_sibling_tree_id",
"=",
"None",
"if",
"hasattr",
"(",
"instance",
",",
"'mptt_move_inside'",
")",
":",
"mptt_move_inside",
"=",
"instance",
".",
"mptt_move_inside",
"if",
"hasattr",
"(",
"instance",
",",
"'mptt_move_before'",
")",
":",
"(",
"right_sibling_left",
",",
"right_sibling_right",
",",
"right_sibling_parent",
",",
"right_sibling_level",
",",
"right_sibling_tree_id",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"parent_id",
",",
"table",
".",
"c",
".",
"level",
",",
"table",
".",
"c",
".",
"tree_id",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"instance",
".",
"mptt_move_before",
")",
")",
".",
"fetchone",
"(",
")",
"current_lvl_nodes",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"parent_id",
",",
"table",
".",
"c",
".",
"tree_id",
"]",
")",
".",
"where",
"(",
"and_",
"(",
"table",
".",
"c",
".",
"level",
"==",
"right_sibling_level",
",",
"table",
".",
"c",
".",
"tree_id",
"==",
"right_sibling_tree_id",
",",
"table",
".",
"c",
".",
"lft",
"<",
"right_sibling_left",
")",
")",
")",
".",
"fetchall",
"(",
")",
"if",
"current_lvl_nodes",
":",
"(",
"left_sibling_left",
",",
"left_sibling_right",
",",
"left_sibling_parent",
",",
"left_sibling_tree_id",
")",
"=",
"current_lvl_nodes",
"[",
"-",
"1",
"]",
"instance",
".",
"parent_id",
"=",
"left_sibling_parent",
"left_sibling",
"=",
"{",
"'lft'",
":",
"left_sibling_left",
",",
"'rgt'",
":",
"left_sibling_right",
",",
"'is_parent'",
":",
"False",
"}",
"# if move_before to top level",
"elif",
"not",
"right_sibling_parent",
":",
"left_sibling_tree_id",
"=",
"right_sibling_tree_id",
"-",
"1",
"# if placed after a particular node",
"if",
"hasattr",
"(",
"instance",
",",
"'mptt_move_after'",
")",
":",
"(",
"left_sibling_left",
",",
"left_sibling_right",
",",
"left_sibling_parent",
",",
"left_sibling_tree_id",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"parent_id",
",",
"table",
".",
"c",
".",
"tree_id",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"instance",
".",
"mptt_move_after",
")",
")",
".",
"fetchone",
"(",
")",
"instance",
".",
"parent_id",
"=",
"left_sibling_parent",
"left_sibling",
"=",
"{",
"'lft'",
":",
"left_sibling_left",
",",
"'rgt'",
":",
"left_sibling_right",
",",
"'is_parent'",
":",
"False",
"}",
"\"\"\" Get subtree from node\n\n SELECT id, name, level FROM my_tree\n WHERE left_key >= $left_key AND right_key <= $right_key\n ORDER BY left_key\n \"\"\"",
"subtree",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table_pk",
"]",
")",
".",
"where",
"(",
"and_",
"(",
"table",
".",
"c",
".",
"lft",
">=",
"instance",
".",
"left",
",",
"table",
".",
"c",
".",
"rgt",
"<=",
"instance",
".",
"right",
",",
"table",
".",
"c",
".",
"tree_id",
"==",
"instance",
".",
"tree_id",
")",
")",
".",
"order_by",
"(",
"table",
".",
"c",
".",
"lft",
")",
")",
".",
"fetchall",
"(",
")",
"subtree",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"subtree",
"]",
"\"\"\" step 0: Initialize parameters.\n\n Put there left and right position of moving node\n \"\"\"",
"(",
"node_pos_left",
",",
"node_pos_right",
",",
"node_tree_id",
",",
"node_parent_id",
",",
"node_level",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"tree_id",
",",
"table",
".",
"c",
".",
"parent_id",
",",
"table",
".",
"c",
".",
"level",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"node_id",
")",
")",
".",
"fetchone",
"(",
")",
"# if instance just update w/o move",
"# XXX why this str() around parent_id comparison?",
"if",
"not",
"left_sibling",
"and",
"str",
"(",
"node_parent_id",
")",
"==",
"str",
"(",
"instance",
".",
"parent_id",
")",
"and",
"not",
"mptt_move_inside",
":",
"if",
"left_sibling_tree_id",
"is",
"None",
":",
"return",
"# fix tree shorting",
"if",
"instance",
".",
"parent_id",
"is",
"not",
"None",
":",
"(",
"parent_id",
",",
"parent_pos_right",
",",
"parent_pos_left",
",",
"parent_tree_id",
",",
"parent_level",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table_pk",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"tree_id",
",",
"table",
".",
"c",
".",
"level",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"instance",
".",
"parent_id",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"node_parent_id",
"is",
"None",
"and",
"node_tree_id",
"==",
"parent_tree_id",
":",
"instance",
".",
"parent_id",
"=",
"None",
"return",
"# delete from old tree",
"mptt_before_delete",
"(",
"mapper",
",",
"connection",
",",
"instance",
",",
"False",
")",
"if",
"instance",
".",
"parent_id",
"is",
"not",
"None",
":",
"\"\"\" Put there right position of new parent node (there moving node\n should be moved)\n \"\"\"",
"(",
"parent_id",
",",
"parent_pos_right",
",",
"parent_pos_left",
",",
"parent_tree_id",
",",
"parent_level",
")",
"=",
"connection",
".",
"execute",
"(",
"select",
"(",
"[",
"table_pk",
",",
"table",
".",
"c",
".",
"rgt",
",",
"table",
".",
"c",
".",
"lft",
",",
"table",
".",
"c",
".",
"tree_id",
",",
"table",
".",
"c",
".",
"level",
"]",
")",
".",
"where",
"(",
"table_pk",
"==",
"instance",
".",
"parent_id",
")",
")",
".",
"fetchone",
"(",
")",
"# 'size' of moving node (including all it's sub nodes)",
"node_size",
"=",
"node_pos_right",
"-",
"node_pos_left",
"+",
"1",
"# left sibling node",
"if",
"not",
"left_sibling",
":",
"left_sibling",
"=",
"{",
"'lft'",
":",
"parent_pos_left",
",",
"'rgt'",
":",
"parent_pos_right",
",",
"'is_parent'",
":",
"True",
"}",
"# insert subtree in exist tree",
"instance",
".",
"tree_id",
"=",
"parent_tree_id",
"_insert_subtree",
"(",
"table",
",",
"connection",
",",
"node_size",
",",
"node_pos_left",
",",
"node_pos_right",
",",
"parent_pos_left",
",",
"parent_pos_right",
",",
"subtree",
",",
"parent_tree_id",
",",
"parent_level",
",",
"node_level",
",",
"left_sibling",
",",
"table_pk",
")",
"else",
":",
"# if insert after",
"if",
"left_sibling_tree_id",
"or",
"left_sibling_tree_id",
"==",
"0",
":",
"tree_id",
"=",
"left_sibling_tree_id",
"+",
"1",
"connection",
".",
"execute",
"(",
"table",
".",
"update",
"(",
"table",
".",
"c",
".",
"tree_id",
">",
"left_sibling_tree_id",
")",
".",
"values",
"(",
"tree_id",
"=",
"table",
".",
"c",
".",
"tree_id",
"+",
"1",
")",
")",
"# if just insert",
"else",
":",
"tree_id",
"=",
"connection",
".",
"scalar",
"(",
"select",
"(",
"[",
"func",
".",
"max",
"(",
"table",
".",
"c",
".",
"tree_id",
")",
"+",
"1",
"]",
")",
")",
"connection",
".",
"execute",
"(",
"table",
".",
"update",
"(",
"table_pk",
".",
"in_",
"(",
"subtree",
")",
")",
".",
"values",
"(",
"lft",
"=",
"table",
".",
"c",
".",
"lft",
"-",
"node_pos_left",
"+",
"1",
",",
"rgt",
"=",
"table",
".",
"c",
".",
"rgt",
"-",
"node_pos_left",
"+",
"1",
",",
"level",
"=",
"table",
".",
"c",
".",
"level",
"-",
"node_level",
"+",
"default_level",
",",
"tree_id",
"=",
"tree_id",
")",
")"
] | Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set | [
"Based",
"on",
"this",
"example",
":",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"889527",
"/",
"move",
"-",
"node",
"-",
"in",
"-",
"nested",
"-",
"set"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/events.py#L221-L479 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/events.py | TreesManager.after_flush_postexec | def after_flush_postexec(self, session, context):
"""
Event listener to recursively expire `left` and `right` attributes the
parents of all modified instances part of this flush.
"""
instances = self.instances[session]
while instances:
instance = instances.pop()
if instance not in session:
continue
parent = self.get_parent_value(instance)
while parent != NO_VALUE and parent is not None:
instances.discard(parent)
session.expire(parent, ['left', 'right', 'tree_id', 'level'])
parent = self.get_parent_value(parent)
else:
session.expire(instance, ['left', 'right', 'tree_id', 'level'])
self.expire_session_for_children(session, instance) | python | def after_flush_postexec(self, session, context):
"""
Event listener to recursively expire `left` and `right` attributes the
parents of all modified instances part of this flush.
"""
instances = self.instances[session]
while instances:
instance = instances.pop()
if instance not in session:
continue
parent = self.get_parent_value(instance)
while parent != NO_VALUE and parent is not None:
instances.discard(parent)
session.expire(parent, ['left', 'right', 'tree_id', 'level'])
parent = self.get_parent_value(parent)
else:
session.expire(instance, ['left', 'right', 'tree_id', 'level'])
self.expire_session_for_children(session, instance) | [
"def",
"after_flush_postexec",
"(",
"self",
",",
"session",
",",
"context",
")",
":",
"instances",
"=",
"self",
".",
"instances",
"[",
"session",
"]",
"while",
"instances",
":",
"instance",
"=",
"instances",
".",
"pop",
"(",
")",
"if",
"instance",
"not",
"in",
"session",
":",
"continue",
"parent",
"=",
"self",
".",
"get_parent_value",
"(",
"instance",
")",
"while",
"parent",
"!=",
"NO_VALUE",
"and",
"parent",
"is",
"not",
"None",
":",
"instances",
".",
"discard",
"(",
"parent",
")",
"session",
".",
"expire",
"(",
"parent",
",",
"[",
"'left'",
",",
"'right'",
",",
"'tree_id'",
",",
"'level'",
"]",
")",
"parent",
"=",
"self",
".",
"get_parent_value",
"(",
"parent",
")",
"else",
":",
"session",
".",
"expire",
"(",
"instance",
",",
"[",
"'left'",
",",
"'right'",
",",
"'tree_id'",
",",
"'level'",
"]",
")",
"self",
".",
"expire_session_for_children",
"(",
"session",
",",
"instance",
")"
] | Event listener to recursively expire `left` and `right` attributes the
parents of all modified instances part of this flush. | [
"Event",
"listener",
"to",
"recursively",
"expire",
"left",
"and",
"right",
"attributes",
"the",
"parents",
"of",
"all",
"modified",
"instances",
"part",
"of",
"this",
"flush",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/events.py#L586-L604 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.is_ancestor_of | def is_ancestor_of(self, other, inclusive=False):
""" class or instance level method which returns True if self is
ancestor (closer to root) of other else False. Optional flag
`inclusive` on whether or not to treat self as ancestor of self.
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure`
"""
if inclusive:
return (self.tree_id == other.tree_id) \
& (self.left <= other.left) \
& (other.right <= self.right)
return (self.tree_id == other.tree_id) \
& (self.left < other.left) \
& (other.right < self.right) | python | def is_ancestor_of(self, other, inclusive=False):
""" class or instance level method which returns True if self is
ancestor (closer to root) of other else False. Optional flag
`inclusive` on whether or not to treat self as ancestor of self.
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure`
"""
if inclusive:
return (self.tree_id == other.tree_id) \
& (self.left <= other.left) \
& (other.right <= self.right)
return (self.tree_id == other.tree_id) \
& (self.left < other.left) \
& (other.right < self.right) | [
"def",
"is_ancestor_of",
"(",
"self",
",",
"other",
",",
"inclusive",
"=",
"False",
")",
":",
"if",
"inclusive",
":",
"return",
"(",
"self",
".",
"tree_id",
"==",
"other",
".",
"tree_id",
")",
"&",
"(",
"self",
".",
"left",
"<=",
"other",
".",
"left",
")",
"&",
"(",
"other",
".",
"right",
"<=",
"self",
".",
"right",
")",
"return",
"(",
"self",
".",
"tree_id",
"==",
"other",
".",
"tree_id",
")",
"&",
"(",
"self",
".",
"left",
"<",
"other",
".",
"left",
")",
"&",
"(",
"other",
".",
"right",
"<",
"self",
".",
"right",
")"
] | class or instance level method which returns True if self is
ancestor (closer to root) of other else False. Optional flag
`inclusive` on whether or not to treat self as ancestor of self.
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure` | [
"class",
"or",
"instance",
"level",
"method",
"which",
"returns",
"True",
"if",
"self",
"is",
"ancestor",
"(",
"closer",
"to",
"root",
")",
"of",
"other",
"else",
"False",
".",
"Optional",
"flag",
"inclusive",
"on",
"whether",
"or",
"not",
"to",
"treat",
"self",
"as",
"ancestor",
"of",
"self",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L124-L139 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.move_inside | def move_inside(self, parent_id):
""" Moving one node of tree inside another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function`
""" # noqa
session = Session.object_session(self)
self.parent_id = parent_id
self.mptt_move_inside = parent_id
session.add(self) | python | def move_inside(self, parent_id):
""" Moving one node of tree inside another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function`
""" # noqa
session = Session.object_session(self)
self.parent_id = parent_id
self.mptt_move_inside = parent_id
session.add(self) | [
"def",
"move_inside",
"(",
"self",
",",
"parent_id",
")",
":",
"# noqa",
"session",
"=",
"Session",
".",
"object_session",
"(",
"self",
")",
"self",
".",
"parent_id",
"=",
"parent_id",
"self",
".",
"mptt_move_inside",
"=",
"parent_id",
"session",
".",
"add",
"(",
"self",
")"
] | Moving one node of tree inside another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function` | [
"Moving",
"one",
"node",
"of",
"tree",
"inside",
"another"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L153-L164 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.move_after | def move_after(self, node_id):
""" Moving one node of tree after another
For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function`
""" # noqa
session = Session.object_session(self)
self.parent_id = self.parent_id
self.mptt_move_after = node_id
session.add(self) | python | def move_after(self, node_id):
""" Moving one node of tree after another
For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function`
""" # noqa
session = Session.object_session(self)
self.parent_id = self.parent_id
self.mptt_move_after = node_id
session.add(self) | [
"def",
"move_after",
"(",
"self",
",",
"node_id",
")",
":",
"# noqa",
"session",
"=",
"Session",
".",
"object_session",
"(",
"self",
")",
"self",
".",
"parent_id",
"=",
"self",
".",
"parent_id",
"self",
".",
"mptt_move_after",
"=",
"node_id",
"session",
".",
"add",
"(",
"self",
")"
] | Moving one node of tree after another
For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function` | [
"Moving",
"one",
"node",
"of",
"tree",
"after",
"another"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L166-L174 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.move_before | def move_before(self, node_id):
""" Moving one node of tree before another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level`
""" # noqa
session = Session.object_session(self)
table = _get_tree_table(self.__mapper__)
pk = getattr(table.c, self.get_pk_column().name)
node = session.query(table).filter(pk == node_id).one()
self.parent_id = node.parent_id
self.mptt_move_before = node_id
session.add(self) | python | def move_before(self, node_id):
""" Moving one node of tree before another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level`
""" # noqa
session = Session.object_session(self)
table = _get_tree_table(self.__mapper__)
pk = getattr(table.c, self.get_pk_column().name)
node = session.query(table).filter(pk == node_id).one()
self.parent_id = node.parent_id
self.mptt_move_before = node_id
session.add(self) | [
"def",
"move_before",
"(",
"self",
",",
"node_id",
")",
":",
"# noqa",
"session",
"=",
"Session",
".",
"object_session",
"(",
"self",
")",
"table",
"=",
"_get_tree_table",
"(",
"self",
".",
"__mapper__",
")",
"pk",
"=",
"getattr",
"(",
"table",
".",
"c",
",",
"self",
".",
"get_pk_column",
"(",
")",
".",
"name",
")",
"node",
"=",
"session",
".",
"query",
"(",
"table",
")",
".",
"filter",
"(",
"pk",
"==",
"node_id",
")",
".",
"one",
"(",
")",
"self",
".",
"parent_id",
"=",
"node",
".",
"parent_id",
"self",
".",
"mptt_move_before",
"=",
"node_id",
"session",
".",
"add",
"(",
"self",
")"
] | Moving one node of tree before another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level` | [
"Moving",
"one",
"node",
"of",
"tree",
"before",
"another"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L176-L191 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.leftsibling_in_level | def leftsibling_in_level(self):
""" Node to the left of the current node at the same level
For example see
:mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level`
""" # noqa
table = _get_tree_table(self.__mapper__)
session = Session.object_session(self)
current_lvl_nodes = session.query(table) \
.filter_by(level=self.level).filter_by(tree_id=self.tree_id) \
.filter(table.c.lft < self.left).order_by(table.c.lft).all()
if current_lvl_nodes:
return current_lvl_nodes[-1]
return None | python | def leftsibling_in_level(self):
""" Node to the left of the current node at the same level
For example see
:mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level`
""" # noqa
table = _get_tree_table(self.__mapper__)
session = Session.object_session(self)
current_lvl_nodes = session.query(table) \
.filter_by(level=self.level).filter_by(tree_id=self.tree_id) \
.filter(table.c.lft < self.left).order_by(table.c.lft).all()
if current_lvl_nodes:
return current_lvl_nodes[-1]
return None | [
"def",
"leftsibling_in_level",
"(",
"self",
")",
":",
"# noqa",
"table",
"=",
"_get_tree_table",
"(",
"self",
".",
"__mapper__",
")",
"session",
"=",
"Session",
".",
"object_session",
"(",
"self",
")",
"current_lvl_nodes",
"=",
"session",
".",
"query",
"(",
"table",
")",
".",
"filter_by",
"(",
"level",
"=",
"self",
".",
"level",
")",
".",
"filter_by",
"(",
"tree_id",
"=",
"self",
".",
"tree_id",
")",
".",
"filter",
"(",
"table",
".",
"c",
".",
"lft",
"<",
"self",
".",
"left",
")",
".",
"order_by",
"(",
"table",
".",
"c",
".",
"lft",
")",
".",
"all",
"(",
")",
"if",
"current_lvl_nodes",
":",
"return",
"current_lvl_nodes",
"[",
"-",
"1",
"]",
"return",
"None"
] | Node to the left of the current node at the same level
For example see
:mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level` | [
"Node",
"to",
"the",
"left",
"of",
"the",
"current",
"node",
"at",
"the",
"same",
"level"
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L193-L206 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets._node_to_dict | def _node_to_dict(cls, node, json, json_fields):
""" Helper method for ``get_tree``.
"""
if json:
pk_name = node.get_pk_name()
# jqTree or jsTree format
result = {'id': getattr(node, pk_name), 'label': node.__repr__()}
if json_fields:
result.update(json_fields(node))
else:
result = {'node': node}
return result | python | def _node_to_dict(cls, node, json, json_fields):
""" Helper method for ``get_tree``.
"""
if json:
pk_name = node.get_pk_name()
# jqTree or jsTree format
result = {'id': getattr(node, pk_name), 'label': node.__repr__()}
if json_fields:
result.update(json_fields(node))
else:
result = {'node': node}
return result | [
"def",
"_node_to_dict",
"(",
"cls",
",",
"node",
",",
"json",
",",
"json_fields",
")",
":",
"if",
"json",
":",
"pk_name",
"=",
"node",
".",
"get_pk_name",
"(",
")",
"# jqTree or jsTree format",
"result",
"=",
"{",
"'id'",
":",
"getattr",
"(",
"node",
",",
"pk_name",
")",
",",
"'label'",
":",
"node",
".",
"__repr__",
"(",
")",
"}",
"if",
"json_fields",
":",
"result",
".",
"update",
"(",
"json_fields",
"(",
"node",
")",
")",
"else",
":",
"result",
"=",
"{",
"'node'",
":",
"node",
"}",
"return",
"result"
] | Helper method for ``get_tree``. | [
"Helper",
"method",
"for",
"get_tree",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L209-L220 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.get_tree | def get_tree(cls, session=None, json=False, json_fields=None, query=None):
""" This method generate tree of current node table in dict or json
format. You can make custom query with attribute ``query``. By default
it return all nodes in table.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
json (bool): if True return JSON jqTree format
json_fields (function): append custom fields in JSON
query (function): it takes :class:`sqlalchemy.orm.query.Query`
object as an argument, and returns in a modified form
::
def query(nodes):
return nodes.filter(node.__class__.tree_id.is_(node.tree_id))
node.get_tree(session=DBSession, json=True, query=query)
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field`
""" # noqa
tree = []
nodes_of_level = {}
# handle custom query
nodes = cls._base_query(session)
if query:
nodes = query(nodes)
nodes = cls._base_order(nodes).all()
# search minimal level of nodes.
min_level = min([node.level for node in nodes] or [None])
def get_node_id(node):
return getattr(node, node.get_pk_name())
for node in nodes:
result = cls._node_to_dict(node, json, json_fields)
parent_id = node.parent_id
if node.level != min_level: # for cildren
# Find parent in the tree
if parent_id not in nodes_of_level.keys():
continue
if 'children' not in nodes_of_level[parent_id]:
nodes_of_level[parent_id]['children'] = []
# Append node to parent
nl = nodes_of_level[parent_id]['children']
nl.append(result)
nodes_of_level[get_node_id(node)] = nl[-1]
else: # for top level nodes
tree.append(result)
nodes_of_level[get_node_id(node)] = tree[-1]
return tree | python | def get_tree(cls, session=None, json=False, json_fields=None, query=None):
""" This method generate tree of current node table in dict or json
format. You can make custom query with attribute ``query``. By default
it return all nodes in table.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
json (bool): if True return JSON jqTree format
json_fields (function): append custom fields in JSON
query (function): it takes :class:`sqlalchemy.orm.query.Query`
object as an argument, and returns in a modified form
::
def query(nodes):
return nodes.filter(node.__class__.tree_id.is_(node.tree_id))
node.get_tree(session=DBSession, json=True, query=query)
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field`
""" # noqa
tree = []
nodes_of_level = {}
# handle custom query
nodes = cls._base_query(session)
if query:
nodes = query(nodes)
nodes = cls._base_order(nodes).all()
# search minimal level of nodes.
min_level = min([node.level for node in nodes] or [None])
def get_node_id(node):
return getattr(node, node.get_pk_name())
for node in nodes:
result = cls._node_to_dict(node, json, json_fields)
parent_id = node.parent_id
if node.level != min_level: # for cildren
# Find parent in the tree
if parent_id not in nodes_of_level.keys():
continue
if 'children' not in nodes_of_level[parent_id]:
nodes_of_level[parent_id]['children'] = []
# Append node to parent
nl = nodes_of_level[parent_id]['children']
nl.append(result)
nodes_of_level[get_node_id(node)] = nl[-1]
else: # for top level nodes
tree.append(result)
nodes_of_level[get_node_id(node)] = tree[-1]
return tree | [
"def",
"get_tree",
"(",
"cls",
",",
"session",
"=",
"None",
",",
"json",
"=",
"False",
",",
"json_fields",
"=",
"None",
",",
"query",
"=",
"None",
")",
":",
"# noqa",
"tree",
"=",
"[",
"]",
"nodes_of_level",
"=",
"{",
"}",
"# handle custom query",
"nodes",
"=",
"cls",
".",
"_base_query",
"(",
"session",
")",
"if",
"query",
":",
"nodes",
"=",
"query",
"(",
"nodes",
")",
"nodes",
"=",
"cls",
".",
"_base_order",
"(",
"nodes",
")",
".",
"all",
"(",
")",
"# search minimal level of nodes.",
"min_level",
"=",
"min",
"(",
"[",
"node",
".",
"level",
"for",
"node",
"in",
"nodes",
"]",
"or",
"[",
"None",
"]",
")",
"def",
"get_node_id",
"(",
"node",
")",
":",
"return",
"getattr",
"(",
"node",
",",
"node",
".",
"get_pk_name",
"(",
")",
")",
"for",
"node",
"in",
"nodes",
":",
"result",
"=",
"cls",
".",
"_node_to_dict",
"(",
"node",
",",
"json",
",",
"json_fields",
")",
"parent_id",
"=",
"node",
".",
"parent_id",
"if",
"node",
".",
"level",
"!=",
"min_level",
":",
"# for cildren",
"# Find parent in the tree",
"if",
"parent_id",
"not",
"in",
"nodes_of_level",
".",
"keys",
"(",
")",
":",
"continue",
"if",
"'children'",
"not",
"in",
"nodes_of_level",
"[",
"parent_id",
"]",
":",
"nodes_of_level",
"[",
"parent_id",
"]",
"[",
"'children'",
"]",
"=",
"[",
"]",
"# Append node to parent",
"nl",
"=",
"nodes_of_level",
"[",
"parent_id",
"]",
"[",
"'children'",
"]",
"nl",
".",
"append",
"(",
"result",
")",
"nodes_of_level",
"[",
"get_node_id",
"(",
"node",
")",
"]",
"=",
"nl",
"[",
"-",
"1",
"]",
"else",
":",
"# for top level nodes",
"tree",
".",
"append",
"(",
"result",
")",
"nodes_of_level",
"[",
"get_node_id",
"(",
"node",
")",
"]",
"=",
"tree",
"[",
"-",
"1",
"]",
"return",
"tree"
] | This method generate tree of current node table in dict or json
format. You can make custom query with attribute ``query``. By default
it return all nodes in table.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
json (bool): if True return JSON jqTree format
json_fields (function): append custom fields in JSON
query (function): it takes :class:`sqlalchemy.orm.query.Query`
object as an argument, and returns in a modified form
::
def query(nodes):
return nodes.filter(node.__class__.tree_id.is_(node.tree_id))
node.get_tree(session=DBSession, json=True, query=query)
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field` | [
"This",
"method",
"generate",
"tree",
"of",
"current",
"node",
"table",
"in",
"dict",
"or",
"json",
"format",
".",
"You",
"can",
"make",
"custom",
"query",
"with",
"attribute",
"query",
".",
"By",
"default",
"it",
"return",
"all",
"nodes",
"in",
"table",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L238-L296 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.drilldown_tree | def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
"""
if not session:
session = object_session(self)
return self.get_tree(
session,
json=json,
json_fields=json_fields,
query=self._drilldown_query
) | python | def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
"""
if not session:
session = object_session(self)
return self.get_tree(
session,
json=json,
json_fields=json_fields,
query=self._drilldown_query
) | [
"def",
"drilldown_tree",
"(",
"self",
",",
"session",
"=",
"None",
",",
"json",
"=",
"False",
",",
"json_fields",
"=",
"None",
")",
":",
"if",
"not",
"session",
":",
"session",
"=",
"object_session",
"(",
"self",
")",
"return",
"self",
".",
"get_tree",
"(",
"session",
",",
"json",
"=",
"json",
",",
"json_fields",
"=",
"json_fields",
",",
"query",
"=",
"self",
".",
"_drilldown_query",
")"
] | This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree` | [
"This",
"method",
"generate",
"a",
"branch",
"from",
"a",
"tree",
"begining",
"with",
"current",
"node",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L304-L337 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.path_to_root | def path_to_root(self, session=None, order=desc):
"""Generate path from a leaf or intermediate node to the root.
For example:
node11.path_to_root()
.. code::
level Nested sets example
-----------------------------------------
1 | 1(1)22 |
________|______|_____________________ |
| | | | |
| ------+--------- | |
2 2(2)5 6(4)11 | -- 12(7)21 |
| ^ | / \ |
3 3(3)4 7(5)8 9(6)10 ---/---- \ |
13(8)16 | 17(10)20 |
| | | |
4 14(9)15 | 18(11)19 |
| |
-------------
"""
table = self.__class__
query = self._base_query_obj(session=session)
query = query.filter(table.is_ancestor_of(self, inclusive=True))
return self._base_order(query, order=order) | python | def path_to_root(self, session=None, order=desc):
"""Generate path from a leaf or intermediate node to the root.
For example:
node11.path_to_root()
.. code::
level Nested sets example
-----------------------------------------
1 | 1(1)22 |
________|______|_____________________ |
| | | | |
| ------+--------- | |
2 2(2)5 6(4)11 | -- 12(7)21 |
| ^ | / \ |
3 3(3)4 7(5)8 9(6)10 ---/---- \ |
13(8)16 | 17(10)20 |
| | | |
4 14(9)15 | 18(11)19 |
| |
-------------
"""
table = self.__class__
query = self._base_query_obj(session=session)
query = query.filter(table.is_ancestor_of(self, inclusive=True))
return self._base_order(query, order=order) | [
"def",
"path_to_root",
"(",
"self",
",",
"session",
"=",
"None",
",",
"order",
"=",
"desc",
")",
":",
"table",
"=",
"self",
".",
"__class__",
"query",
"=",
"self",
".",
"_base_query_obj",
"(",
"session",
"=",
"session",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"table",
".",
"is_ancestor_of",
"(",
"self",
",",
"inclusive",
"=",
"True",
")",
")",
"return",
"self",
".",
"_base_order",
"(",
"query",
",",
"order",
"=",
"order",
")"
] | Generate path from a leaf or intermediate node to the root.
For example:
node11.path_to_root()
.. code::
level Nested sets example
-----------------------------------------
1 | 1(1)22 |
________|______|_____________________ |
| | | | |
| ------+--------- | |
2 2(2)5 6(4)11 | -- 12(7)21 |
| ^ | / \ |
3 3(3)4 7(5)8 9(6)10 ---/---- \ |
13(8)16 | 17(10)20 |
| | | |
4 14(9)15 | 18(11)19 |
| |
------------- | [
"Generate",
"path",
"from",
"a",
"leaf",
"or",
"intermediate",
"node",
"to",
"the",
"root",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L339-L367 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.rebuild_tree | def rebuild_tree(cls, session, tree_id):
""" This method rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
tree_id (int or str): id of tree
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`
"""
session.query(cls).filter_by(tree_id=tree_id)\
.update({cls.left: 0, cls.right: 0, cls.level: 0})
top = session.query(cls).filter_by(parent_id=None)\
.filter_by(tree_id=tree_id).one()
top.left = left = 1
top.right = right = 2
top.level = level = cls.get_default_level()
def recursive(children, left, right, level):
level = level + 1
for i, node in enumerate(children):
same_level_right = children[i - 1].right
left = left + 1
if i > 0:
left = left + 1
if same_level_right:
left = same_level_right + 1
right = left + 1
node.left = left
node.right = right
parent = node.parent
j = 0
while parent:
parent.right = right + 1 + j
parent = parent.parent
j += 1
node.level = level
recursive(node.children, left, right, level)
recursive(top.children, left, right, level) | python | def rebuild_tree(cls, session, tree_id):
""" This method rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
tree_id (int or str): id of tree
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`
"""
session.query(cls).filter_by(tree_id=tree_id)\
.update({cls.left: 0, cls.right: 0, cls.level: 0})
top = session.query(cls).filter_by(parent_id=None)\
.filter_by(tree_id=tree_id).one()
top.left = left = 1
top.right = right = 2
top.level = level = cls.get_default_level()
def recursive(children, left, right, level):
level = level + 1
for i, node in enumerate(children):
same_level_right = children[i - 1].right
left = left + 1
if i > 0:
left = left + 1
if same_level_right:
left = same_level_right + 1
right = left + 1
node.left = left
node.right = right
parent = node.parent
j = 0
while parent:
parent.right = right + 1 + j
parent = parent.parent
j += 1
node.level = level
recursive(node.children, left, right, level)
recursive(top.children, left, right, level) | [
"def",
"rebuild_tree",
"(",
"cls",
",",
"session",
",",
"tree_id",
")",
":",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter_by",
"(",
"tree_id",
"=",
"tree_id",
")",
".",
"update",
"(",
"{",
"cls",
".",
"left",
":",
"0",
",",
"cls",
".",
"right",
":",
"0",
",",
"cls",
".",
"level",
":",
"0",
"}",
")",
"top",
"=",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter_by",
"(",
"parent_id",
"=",
"None",
")",
".",
"filter_by",
"(",
"tree_id",
"=",
"tree_id",
")",
".",
"one",
"(",
")",
"top",
".",
"left",
"=",
"left",
"=",
"1",
"top",
".",
"right",
"=",
"right",
"=",
"2",
"top",
".",
"level",
"=",
"level",
"=",
"cls",
".",
"get_default_level",
"(",
")",
"def",
"recursive",
"(",
"children",
",",
"left",
",",
"right",
",",
"level",
")",
":",
"level",
"=",
"level",
"+",
"1",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"children",
")",
":",
"same_level_right",
"=",
"children",
"[",
"i",
"-",
"1",
"]",
".",
"right",
"left",
"=",
"left",
"+",
"1",
"if",
"i",
">",
"0",
":",
"left",
"=",
"left",
"+",
"1",
"if",
"same_level_right",
":",
"left",
"=",
"same_level_right",
"+",
"1",
"right",
"=",
"left",
"+",
"1",
"node",
".",
"left",
"=",
"left",
"node",
".",
"right",
"=",
"right",
"parent",
"=",
"node",
".",
"parent",
"j",
"=",
"0",
"while",
"parent",
":",
"parent",
".",
"right",
"=",
"right",
"+",
"1",
"+",
"j",
"parent",
"=",
"parent",
".",
"parent",
"j",
"+=",
"1",
"node",
".",
"level",
"=",
"level",
"recursive",
"(",
"node",
".",
"children",
",",
"left",
",",
"right",
",",
"level",
")",
"recursive",
"(",
"top",
".",
"children",
",",
"left",
",",
"right",
",",
"level",
")"
] | This method rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
tree_id (int or str): id of tree
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild` | [
"This",
"method",
"rebuid",
"tree",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L370-L414 |
uralbash/sqlalchemy_mptt | sqlalchemy_mptt/mixins.py | BaseNestedSets.rebuild | def rebuild(cls, session, tree_id=None):
""" This function rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
tree_id (int or str): id of tree, default None
Example:
* :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`
"""
trees = session.query(cls).filter_by(parent_id=None)
if tree_id:
trees = trees.filter_by(tree_id=tree_id)
for tree in trees:
cls.rebuild_tree(session, tree.tree_id) | python | def rebuild(cls, session, tree_id=None):
""" This function rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
tree_id (int or str): id of tree, default None
Example:
* :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`
"""
trees = session.query(cls).filter_by(parent_id=None)
if tree_id:
trees = trees.filter_by(tree_id=tree_id)
for tree in trees:
cls.rebuild_tree(session, tree.tree_id) | [
"def",
"rebuild",
"(",
"cls",
",",
"session",
",",
"tree_id",
"=",
"None",
")",
":",
"trees",
"=",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter_by",
"(",
"parent_id",
"=",
"None",
")",
"if",
"tree_id",
":",
"trees",
"=",
"trees",
".",
"filter_by",
"(",
"tree_id",
"=",
"tree_id",
")",
"for",
"tree",
"in",
"trees",
":",
"cls",
".",
"rebuild_tree",
"(",
"session",
",",
"tree",
".",
"tree_id",
")"
] | This function rebuid tree.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
tree_id (int or str): id of tree, default None
Example:
* :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild` | [
"This",
"function",
"rebuid",
"tree",
"."
] | train | https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L417-L435 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | qx | def qx(mt, x):
""" qx: Returns the probability that a life aged x dies before 1 year
With the convention: the true probability is qx/1000
Args:
mt: the mortality table
x: the age as integer number.
"""
if x < len(mt.qx):
return mt.qx[x]
else:
return 0 | python | def qx(mt, x):
""" qx: Returns the probability that a life aged x dies before 1 year
With the convention: the true probability is qx/1000
Args:
mt: the mortality table
x: the age as integer number.
"""
if x < len(mt.qx):
return mt.qx[x]
else:
return 0 | [
"def",
"qx",
"(",
"mt",
",",
"x",
")",
":",
"if",
"x",
"<",
"len",
"(",
"mt",
".",
"qx",
")",
":",
"return",
"mt",
".",
"qx",
"[",
"x",
"]",
"else",
":",
"return",
"0"
] | qx: Returns the probability that a life aged x dies before 1 year
With the convention: the true probability is qx/1000
Args:
mt: the mortality table
x: the age as integer number. | [
"qx",
":",
"Returns",
"the",
"probability",
"that",
"a",
"life",
"aged",
"x",
"dies",
"before",
"1",
"year",
"With",
"the",
"convention",
":",
"the",
"true",
"probability",
"is",
"qx",
"/",
"1000",
"Args",
":",
"mt",
":",
"the",
"mortality",
"table",
"x",
":",
"the",
"age",
"as",
"integer",
"number",
"."
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L180-L190 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | lx | def lx(mt, x):
""" lx : Returns the number of survivors at begining of age x """
if x < len(mt.lx):
return mt.lx[x]
else:
return 0 | python | def lx(mt, x):
""" lx : Returns the number of survivors at begining of age x """
if x < len(mt.lx):
return mt.lx[x]
else:
return 0 | [
"def",
"lx",
"(",
"mt",
",",
"x",
")",
":",
"if",
"x",
"<",
"len",
"(",
"mt",
".",
"lx",
")",
":",
"return",
"mt",
".",
"lx",
"[",
"x",
"]",
"else",
":",
"return",
"0"
] | lx : Returns the number of survivors at begining of age x | [
"lx",
":",
"Returns",
"the",
"number",
"of",
"survivors",
"at",
"begining",
"of",
"age",
"x"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L192-L197 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | dx | def dx(mt, x):
""" Returns the number of dying at begining of age x """
end_x_val = mt.lx.index(0)
if x < end_x_val:
return mt.lx[x] - mt.lx[x + 1]
else:
return 0.0 | python | def dx(mt, x):
""" Returns the number of dying at begining of age x """
end_x_val = mt.lx.index(0)
if x < end_x_val:
return mt.lx[x] - mt.lx[x + 1]
else:
return 0.0 | [
"def",
"dx",
"(",
"mt",
",",
"x",
")",
":",
"end_x_val",
"=",
"mt",
".",
"lx",
".",
"index",
"(",
"0",
")",
"if",
"x",
"<",
"end_x_val",
":",
"return",
"mt",
".",
"lx",
"[",
"x",
"]",
"-",
"mt",
".",
"lx",
"[",
"x",
"+",
"1",
"]",
"else",
":",
"return",
"0.0"
] | Returns the number of dying at begining of age x | [
"Returns",
"the",
"number",
"of",
"dying",
"at",
"begining",
"of",
"age",
"x"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L203-L209 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | tpx | def tpx(mt, x, t):
""" tpx : Returns the probability that x will survive within t years """
""" npx : Returns n years survival probability at age x """
return mt.lx[x + t] / mt.lx[x] | python | def tpx(mt, x, t):
""" tpx : Returns the probability that x will survive within t years """
""" npx : Returns n years survival probability at age x """
return mt.lx[x + t] / mt.lx[x] | [
"def",
"tpx",
"(",
"mt",
",",
"x",
",",
"t",
")",
":",
"\"\"\" npx : Returns n years survival probability at age x \"\"\"",
"return",
"mt",
".",
"lx",
"[",
"x",
"+",
"t",
"]",
"/",
"mt",
".",
"lx",
"[",
"x",
"]"
] | tpx : Returns the probability that x will survive within t years | [
"tpx",
":",
"Returns",
"the",
"probability",
"that",
"x",
"will",
"survive",
"within",
"t",
"years"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L215-L218 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | tqx | def tqx(mt, x, t):
""" nqx : Returns the probability to die within n years at age x """
return (mt.lx[x] - mt.lx[x + t]) / mt.lx[x] | python | def tqx(mt, x, t):
""" nqx : Returns the probability to die within n years at age x """
return (mt.lx[x] - mt.lx[x + t]) / mt.lx[x] | [
"def",
"tqx",
"(",
"mt",
",",
"x",
",",
"t",
")",
":",
"return",
"(",
"mt",
".",
"lx",
"[",
"x",
"]",
"-",
"mt",
".",
"lx",
"[",
"x",
"+",
"t",
"]",
")",
"/",
"mt",
".",
"lx",
"[",
"x",
"]"
] | nqx : Returns the probability to die within n years at age x | [
"nqx",
":",
"Returns",
"the",
"probability",
"to",
"die",
"within",
"n",
"years",
"at",
"age",
"x"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L220-L222 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | tqxn | def tqxn(mt, x, n, t):
""" n/qx : Probability to die in n years being alive at age x.
Probability that x survives n year, and then dies in th subsequent t years """
return tpx(mt, x, t) * qx(mt, x + n) | python | def tqxn(mt, x, n, t):
""" n/qx : Probability to die in n years being alive at age x.
Probability that x survives n year, and then dies in th subsequent t years """
return tpx(mt, x, t) * qx(mt, x + n) | [
"def",
"tqxn",
"(",
"mt",
",",
"x",
",",
"n",
",",
"t",
")",
":",
"return",
"tpx",
"(",
"mt",
",",
"x",
",",
"t",
")",
"*",
"qx",
"(",
"mt",
",",
"x",
"+",
"n",
")"
] | n/qx : Probability to die in n years being alive at age x.
Probability that x survives n year, and then dies in th subsequent t years | [
"n",
"/",
"qx",
":",
"Probability",
"to",
"die",
"in",
"n",
"years",
"being",
"alive",
"at",
"age",
"x",
".",
"Probability",
"that",
"x",
"survives",
"n",
"year",
"and",
"then",
"dies",
"in",
"th",
"subsequent",
"t",
"years"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L224-L227 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | ex | def ex(mt, x):
""" ex : Returns the curtate expectation of life. Life expectancy """
sum1 = 0
for j in mt.lx[x + 1:-1]:
sum1 += j
#print sum1
try:
return sum1 / mt.lx[x] + 0.5
except:
return 0 | python | def ex(mt, x):
""" ex : Returns the curtate expectation of life. Life expectancy """
sum1 = 0
for j in mt.lx[x + 1:-1]:
sum1 += j
#print sum1
try:
return sum1 / mt.lx[x] + 0.5
except:
return 0 | [
"def",
"ex",
"(",
"mt",
",",
"x",
")",
":",
"sum1",
"=",
"0",
"for",
"j",
"in",
"mt",
".",
"lx",
"[",
"x",
"+",
"1",
":",
"-",
"1",
"]",
":",
"sum1",
"+=",
"j",
"#print sum1",
"try",
":",
"return",
"sum1",
"/",
"mt",
".",
"lx",
"[",
"x",
"]",
"+",
"0.5",
"except",
":",
"return",
"0"
] | ex : Returns the curtate expectation of life. Life expectancy | [
"ex",
":",
"Returns",
"the",
"curtate",
"expectation",
"of",
"life",
".",
"Life",
"expectancy"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L229-L238 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | Sx | def Sx(mt, x):
""" Return the Sx """
n = len(mt.Nx)
sum1 = 0
for j in range(x, n):
k = mt.Nx[j]
sum1 += k
return sum1 | python | def Sx(mt, x):
""" Return the Sx """
n = len(mt.Nx)
sum1 = 0
for j in range(x, n):
k = mt.Nx[j]
sum1 += k
return sum1 | [
"def",
"Sx",
"(",
"mt",
",",
"x",
")",
":",
"n",
"=",
"len",
"(",
"mt",
".",
"Nx",
")",
"sum1",
"=",
"0",
"for",
"j",
"in",
"range",
"(",
"x",
",",
"n",
")",
":",
"k",
"=",
"mt",
".",
"Nx",
"[",
"j",
"]",
"sum1",
"+=",
"k",
"return",
"sum1"
] | Return the Sx | [
"Return",
"the",
"Sx"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L259-L266 |
franciscogarate/pyliferisk | pyliferisk/__init__.py | Cx | def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) | python | def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) | [
"def",
"Cx",
"(",
"mt",
",",
"x",
")",
":",
"return",
"(",
"(",
"1",
"/",
"(",
"1",
"+",
"mt",
".",
"i",
")",
")",
"**",
"(",
"x",
"+",
"1",
")",
")",
"*",
"mt",
".",
"dx",
"[",
"x",
"]",
"*",
"(",
"(",
"1",
"+",
"mt",
".",
"i",
")",
"**",
"0.5",
")"
] | Return the Cx | [
"Return",
"the",
"Cx"
] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L268-L270 |
Subsets and Splits