repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.prepare | def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement | python | def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement | [
"def",
"prepare",
"(",
"self",
",",
"connection_id",
",",
"sql",
",",
"max_rows_total",
"=",
"None",
")",
":",
"request",
"=",
"requests_pb2",
".",
"PrepareRequest",
"(",
")",
"request",
".",
"connection_id",
"=",
"connection_id",
"request",
".",
"sql",
"=",
"sql",
"if",
"max_rows_total",
"is",
"not",
"None",
":",
"request",
".",
"max_rows_total",
"=",
"max_rows_total",
"response_data",
"=",
"self",
".",
"_apply",
"(",
"request",
")",
"response",
"=",
"responses_pb2",
".",
"PrepareResponse",
"(",
")",
"response",
".",
"ParseFromString",
"(",
"response_data",
")",
"return",
"response",
".",
"statement"
] | Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement. | [
"Prepares",
"a",
"statement",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L410-L434 |
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.execute | def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results | python | def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results | [
"def",
"execute",
"(",
"self",
",",
"connection_id",
",",
"statement_id",
",",
"signature",
",",
"parameter_values",
"=",
"None",
",",
"first_frame_max_size",
"=",
"None",
")",
":",
"request",
"=",
"requests_pb2",
".",
"ExecuteRequest",
"(",
")",
"request",
".",
"statementHandle",
".",
"id",
"=",
"statement_id",
"request",
".",
"statementHandle",
".",
"connection_id",
"=",
"connection_id",
"request",
".",
"statementHandle",
".",
"signature",
".",
"CopyFrom",
"(",
"signature",
")",
"if",
"parameter_values",
"is",
"not",
"None",
":",
"request",
".",
"parameter_values",
".",
"extend",
"(",
"parameter_values",
")",
"request",
".",
"has_parameter_values",
"=",
"True",
"if",
"first_frame_max_size",
"is",
"not",
"None",
":",
"request",
".",
"deprecated_first_frame_max_size",
"=",
"first_frame_max_size",
"request",
".",
"first_frame_max_size",
"=",
"first_frame_max_size",
"response_data",
"=",
"self",
".",
"_apply",
"(",
"request",
")",
"response",
"=",
"responses_pb2",
".",
"ExecuteResponse",
"(",
")",
"response",
".",
"ParseFromString",
"(",
"response_data",
")",
"return",
"response",
".",
"results"
] | Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more. | [
"Returns",
"a",
"frame",
"of",
"rows",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L436-L475 |
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.fetch | def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame | python | def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame | [
"def",
"fetch",
"(",
"self",
",",
"connection_id",
",",
"statement_id",
",",
"offset",
"=",
"0",
",",
"frame_max_size",
"=",
"None",
")",
":",
"request",
"=",
"requests_pb2",
".",
"FetchRequest",
"(",
")",
"request",
".",
"connection_id",
"=",
"connection_id",
"request",
".",
"statement_id",
"=",
"statement_id",
"request",
".",
"offset",
"=",
"offset",
"if",
"frame_max_size",
"is",
"not",
"None",
":",
"request",
".",
"frame_max_size",
"=",
"frame_max_size",
"response_data",
"=",
"self",
".",
"_apply",
"(",
"request",
")",
"response",
"=",
"responses_pb2",
".",
"FetchResponse",
"(",
")",
"response",
".",
"ParseFromString",
"(",
"response_data",
")",
"return",
"response",
".",
"frame"
] | Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more. | [
"Returns",
"a",
"frame",
"of",
"rows",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L477-L509 |
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor.close | def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True | python | def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"raise",
"ProgrammingError",
"(",
"'the cursor is already closed'",
")",
"if",
"self",
".",
"_id",
"is",
"not",
"None",
":",
"self",
".",
"_connection",
".",
"_client",
".",
"close_statement",
"(",
"self",
".",
"_connection",
".",
"_id",
",",
"self",
".",
"_id",
")",
"self",
".",
"_id",
"=",
"None",
"self",
".",
"_signature",
"=",
"None",
"self",
".",
"_column_data_types",
"=",
"[",
"]",
"self",
".",
"_frame",
"=",
"None",
"self",
".",
"_pos",
"=",
"None",
"self",
".",
"_closed",
"=",
"True"
] | Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. | [
"Closes",
"the",
"cursor",
".",
"No",
"further",
"operations",
"are",
"allowed",
"once",
"the",
"cursor",
"is",
"closed",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L86-L102 |
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor._transform_row | def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError
"""
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row | python | def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError
"""
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row | [
"def",
"_transform_row",
"(",
"self",
",",
"row",
")",
":",
"tmp_row",
"=",
"[",
"]",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"row",
".",
"value",
")",
":",
"if",
"column",
".",
"has_array_value",
":",
"raise",
"NotImplementedError",
"(",
"'array types are not supported'",
")",
"elif",
"column",
".",
"scalar_value",
".",
"null",
":",
"tmp_row",
".",
"append",
"(",
"None",
")",
"else",
":",
"field_name",
",",
"rep",
",",
"mutate_to",
",",
"cast_from",
"=",
"self",
".",
"_column_data_types",
"[",
"i",
"]",
"# get the value from the field_name",
"value",
"=",
"getattr",
"(",
"column",
".",
"scalar_value",
",",
"field_name",
")",
"# cast the value",
"if",
"cast_from",
"is",
"not",
"None",
":",
"value",
"=",
"cast_from",
"(",
"value",
")",
"tmp_row",
".",
"append",
"(",
"value",
")",
"return",
"tmp_row"
] | Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError | [
"Transforms",
"a",
"Row",
"into",
"Python",
"values",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L233-L263 |
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor.rownumber | def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence.
"""
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos | python | def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence.
"""
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos | [
"def",
"rownumber",
"(",
"self",
")",
":",
"if",
"self",
".",
"_frame",
"is",
"not",
"None",
"and",
"self",
".",
"_pos",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_frame",
".",
"offset",
"+",
"self",
".",
"_pos",
"return",
"self",
".",
"_pos"
] | Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence. | [
"Read",
"-",
"only",
"attribute",
"providing",
"the",
"current",
"0",
"-",
"based",
"index",
"of",
"the",
"cursor",
"in",
"the",
"result",
"set",
"or",
"None",
"if",
"the",
"index",
"cannot",
"be",
"determined",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L324-L335 |
lalinsky/python-phoenixdb | phoenixdb/types.py | Timestamp | def Timestamp(year, month, day, hour, minute, second):
"""Constructs an object holding a datetime/timestamp value."""
return datetime.datetime(year, month, day, hour, minute, second) | python | def Timestamp(year, month, day, hour, minute, second):
"""Constructs an object holding a datetime/timestamp value."""
return datetime.datetime(year, month, day, hour, minute, second) | [
"def",
"Timestamp",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")",
":",
"return",
"datetime",
".",
"datetime",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")"
] | Constructs an object holding a datetime/timestamp value. | [
"Constructs",
"an",
"object",
"holding",
"a",
"datetime",
"/",
"timestamp",
"value",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/types.py#L38-L40 |
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.open | def open(self):
"""Opens the connection."""
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args) | python | def open(self):
"""Opens the connection."""
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args) | [
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"self",
".",
"_client",
".",
"open_connection",
"(",
"self",
".",
"_id",
",",
"info",
"=",
"self",
".",
"_connection_args",
")"
] | Opens the connection. | [
"Opens",
"the",
"connection",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L70-L73 |
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.close | def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True | python | def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"raise",
"ProgrammingError",
"(",
"'the connection is already closed'",
")",
"for",
"cursor_ref",
"in",
"self",
".",
"_cursors",
":",
"cursor",
"=",
"cursor_ref",
"(",
")",
"if",
"cursor",
"is",
"not",
"None",
"and",
"not",
"cursor",
".",
"_closed",
":",
"cursor",
".",
"close",
"(",
")",
"self",
".",
"_client",
".",
"close_connection",
"(",
"self",
".",
"_id",
")",
"self",
".",
"_client",
".",
"close",
"(",
")",
"self",
".",
"_closed",
"=",
"True"
] | Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. | [
"Closes",
"the",
"connection",
".",
"No",
"further",
"operations",
"are",
"allowed",
"either",
"on",
"the",
"connection",
"or",
"any",
"of",
"its",
"cursors",
"once",
"the",
"connection",
"is",
"closed",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L75-L91 |
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.cursor | def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor | python | def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor | [
"def",
"cursor",
"(",
"self",
",",
"cursor_factory",
"=",
"None",
")",
":",
"if",
"self",
".",
"_closed",
":",
"raise",
"ProgrammingError",
"(",
"'the connection is already closed'",
")",
"cursor",
"=",
"(",
"cursor_factory",
"or",
"self",
".",
"cursor_factory",
")",
"(",
"self",
")",
"self",
".",
"_cursors",
".",
"append",
"(",
"weakref",
".",
"ref",
"(",
"cursor",
",",
"self",
".",
"_cursors",
".",
"remove",
")",
")",
"return",
"cursor"
] | Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object. | [
"Creates",
"a",
"new",
"cursor",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L109-L126 |
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.set_session | def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation | python | def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation | [
"def",
"set_session",
"(",
"self",
",",
"autocommit",
"=",
"None",
",",
"readonly",
"=",
"None",
")",
":",
"props",
"=",
"{",
"}",
"if",
"autocommit",
"is",
"not",
"None",
":",
"props",
"[",
"'autoCommit'",
"]",
"=",
"bool",
"(",
"autocommit",
")",
"if",
"readonly",
"is",
"not",
"None",
":",
"props",
"[",
"'readOnly'",
"]",
"=",
"bool",
"(",
"readonly",
")",
"props",
"=",
"self",
".",
"_client",
".",
"connection_sync",
"(",
"self",
".",
"_id",
",",
"props",
")",
"self",
".",
"_autocommit",
"=",
"props",
".",
"auto_commit",
"self",
".",
"_readonly",
"=",
"props",
".",
"read_only",
"self",
".",
"_transactionisolation",
"=",
"props",
".",
"transaction_isolation"
] | Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode. | [
"Sets",
"one",
"or",
"more",
"parameters",
"in",
"the",
"current",
"connection",
"."
] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L128-L147 |
wdm0006/sklearn-extensions | sklearn_extensions/kernel_regression/kr.py | KernelRegression.fit | def fit(self, X, y):
"""Fit the model
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values
Returns
-------
self : object
Returns self.
"""
self.X = X
self.y = y
if hasattr(self.gamma, "__iter__"):
self.gamma = self._optimize_gamma(self.gamma)
return self | python | def fit(self, X, y):
"""Fit the model
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values
Returns
-------
self : object
Returns self.
"""
self.X = X
self.y = y
if hasattr(self.gamma, "__iter__"):
self.gamma = self._optimize_gamma(self.gamma)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"X",
"=",
"X",
"self",
".",
"y",
"=",
"y",
"if",
"hasattr",
"(",
"self",
".",
"gamma",
",",
"\"__iter__\"",
")",
":",
"self",
".",
"gamma",
"=",
"self",
".",
"_optimize_gamma",
"(",
"self",
".",
"gamma",
")",
"return",
"self"
] | Fit the model
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values
Returns
-------
self : object
Returns self. | [
"Fit",
"the",
"model"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/kernel_regression/kr.py#L46-L68 |
wdm0006/sklearn-extensions | sklearn_extensions/kernel_regression/kr.py | KernelRegression.predict | def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value.
"""
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) | python | def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value.
"""
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"K",
"=",
"pairwise_kernels",
"(",
"self",
".",
"X",
",",
"X",
",",
"metric",
"=",
"self",
".",
"kernel",
",",
"gamma",
"=",
"self",
".",
"gamma",
")",
"return",
"(",
"K",
"*",
"self",
".",
"y",
"[",
":",
",",
"None",
"]",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"/",
"K",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] | Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value. | [
"Predict",
"target",
"values",
"for",
"X",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/kernel_regression/kr.py#L70-L84 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | BaseRandomLayer._compute_hidden_activations | def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new | python | def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new | [
"def",
"_compute_hidden_activations",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"_compute_input_activations",
"(",
"X",
")",
"acts",
"=",
"self",
".",
"input_activations_",
"if",
"(",
"callable",
"(",
"self",
".",
"activation_func",
")",
")",
":",
"args_dict",
"=",
"self",
".",
"activation_args",
"if",
"(",
"self",
".",
"activation_args",
")",
"else",
"{",
"}",
"X_new",
"=",
"self",
".",
"activation_func",
"(",
"acts",
",",
"*",
"*",
"args_dict",
")",
"else",
":",
"func_name",
"=",
"self",
".",
"activation_func",
"func",
"=",
"self",
".",
"_internal_activation_funcs",
"[",
"func_name",
"]",
"X_new",
"=",
"func",
"(",
"acts",
",",
"*",
"*",
"self",
".",
"_extra_args",
")",
"return",
"X_new"
] | Compute hidden activations given X | [
"Compute",
"hidden",
"activations",
"given",
"X"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L72-L88 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | BaseRandomLayer.transform | def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) | python | def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"(",
"self",
".",
"components_",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'No components initialized'",
")",
"return",
"self",
".",
"_compute_hidden_activations",
"(",
"X",
")"
] | Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components] | [
"Generate",
"the",
"random",
"hidden",
"layer",
"s",
"activations",
"given",
"X",
"as",
"input",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L114-L132 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._compute_radii | def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii | python | def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii | [
"def",
"_compute_radii",
"(",
"self",
")",
":",
"# use supplied radii if present",
"radii",
"=",
"self",
".",
"_get_user_components",
"(",
"'radii'",
")",
"# compute radii",
"if",
"(",
"radii",
"is",
"None",
")",
":",
"centers",
"=",
"self",
".",
"components_",
"[",
"'centers'",
"]",
"n_centers",
"=",
"centers",
".",
"shape",
"[",
"0",
"]",
"max_dist",
"=",
"np",
".",
"max",
"(",
"pairwise_distances",
"(",
"centers",
")",
")",
"radii",
"=",
"np",
".",
"ones",
"(",
"n_centers",
")",
"*",
"max_dist",
"/",
"sqrt",
"(",
"2.0",
"*",
"n_centers",
")",
"self",
".",
"components_",
"[",
"'radii'",
"]",
"=",
"radii"
] | Generate RBF radii | [
"Generate",
"RBF",
"radii"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L280-L294 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._compute_centers | def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = range(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers | python | def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = range(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers | [
"def",
"_compute_centers",
"(",
"self",
",",
"X",
",",
"sparse",
",",
"rs",
")",
":",
"# use supplied centers if present",
"centers",
"=",
"self",
".",
"_get_user_components",
"(",
"'centers'",
")",
"# use points taken uniformly from the bounding",
"# hyperrectangle",
"if",
"(",
"centers",
"is",
"None",
")",
":",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"if",
"(",
"sparse",
")",
":",
"fxr",
"=",
"range",
"(",
"n_features",
")",
"cols",
"=",
"[",
"X",
".",
"getcol",
"(",
"i",
")",
"for",
"i",
"in",
"fxr",
"]",
"min_dtype",
"=",
"X",
".",
"dtype",
".",
"type",
"(",
"1.0e10",
")",
"sp_min",
"=",
"lambda",
"col",
":",
"np",
".",
"minimum",
"(",
"min_dtype",
",",
"np",
".",
"min",
"(",
"col",
".",
"data",
")",
")",
"min_Xs",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"sp_min",
",",
"cols",
")",
")",
"max_dtype",
"=",
"X",
".",
"dtype",
".",
"type",
"(",
"-",
"1.0e10",
")",
"sp_max",
"=",
"lambda",
"col",
":",
"np",
".",
"maximum",
"(",
"max_dtype",
",",
"np",
".",
"max",
"(",
"col",
".",
"data",
")",
")",
"max_Xs",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"sp_max",
",",
"cols",
")",
")",
"else",
":",
"min_Xs",
"=",
"X",
".",
"min",
"(",
"axis",
"=",
"0",
")",
"max_Xs",
"=",
"X",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"spans",
"=",
"max_Xs",
"-",
"min_Xs",
"ctrs_size",
"=",
"(",
"self",
".",
"n_hidden",
",",
"n_features",
")",
"centers",
"=",
"min_Xs",
"+",
"spans",
"*",
"rs",
".",
"uniform",
"(",
"0.0",
",",
"1.0",
",",
"ctrs_size",
")",
"self",
".",
"components_",
"[",
"'centers'",
"]",
"=",
"centers"
] | Generate RBF centers | [
"Generate",
"RBF",
"centers"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L296-L326 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._compute_biases | def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases | python | def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases | [
"def",
"_compute_biases",
"(",
"self",
",",
"rs",
")",
":",
"# use supplied biases if present",
"biases",
"=",
"self",
".",
"_get_user_components",
"(",
"'biases'",
")",
"if",
"(",
"biases",
"is",
"None",
")",
":",
"b_size",
"=",
"self",
".",
"n_hidden",
"biases",
"=",
"rs",
".",
"normal",
"(",
"size",
"=",
"b_size",
")",
"self",
".",
"components_",
"[",
"'biases'",
"]",
"=",
"biases"
] | Generate MLP biases | [
"Generate",
"MLP",
"biases"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L328-L337 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._compute_weights | def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights | python | def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights | [
"def",
"_compute_weights",
"(",
"self",
",",
"X",
",",
"rs",
")",
":",
"# use supplied weights if present",
"weights",
"=",
"self",
".",
"_get_user_components",
"(",
"'weights'",
")",
"if",
"(",
"weights",
"is",
"None",
")",
":",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"hw_size",
"=",
"(",
"n_features",
",",
"self",
".",
"n_hidden",
")",
"weights",
"=",
"rs",
".",
"normal",
"(",
"size",
"=",
"hw_size",
")",
"self",
".",
"components_",
"[",
"'weights'",
"]",
"=",
"weights"
] | Generate MLP weights | [
"Generate",
"MLP",
"weights"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L339-L349 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._generate_components | def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii() | python | def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii() | [
"def",
"_generate_components",
"(",
"self",
",",
"X",
")",
":",
"rs",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"if",
"(",
"self",
".",
"_use_mlp_input",
")",
":",
"self",
".",
"_compute_biases",
"(",
"rs",
")",
"self",
".",
"_compute_weights",
"(",
"X",
",",
"rs",
")",
"if",
"(",
"self",
".",
"_use_rbf_input",
")",
":",
"self",
".",
"_compute_centers",
"(",
"X",
",",
"sp",
".",
"issparse",
"(",
"X",
")",
",",
"rs",
")",
"self",
".",
"_compute_radii",
"(",
")"
] | Generate components of hidden layer given X | [
"Generate",
"components",
"of",
"hidden",
"layer",
"given",
"X"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L351-L361 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | RandomLayer._compute_input_activations | def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts | python | def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts | [
"def",
"_compute_input_activations",
"(",
"self",
",",
"X",
")",
":",
"n_samples",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"mlp_acts",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_samples",
",",
"self",
".",
"n_hidden",
")",
")",
"if",
"(",
"self",
".",
"_use_mlp_input",
")",
":",
"b",
"=",
"self",
".",
"components_",
"[",
"'biases'",
"]",
"w",
"=",
"self",
".",
"components_",
"[",
"'weights'",
"]",
"mlp_acts",
"=",
"self",
".",
"alpha",
"*",
"(",
"safe_sparse_dot",
"(",
"X",
",",
"w",
")",
"+",
"b",
")",
"rbf_acts",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_samples",
",",
"self",
".",
"n_hidden",
")",
")",
"if",
"(",
"self",
".",
"_use_rbf_input",
")",
":",
"radii",
"=",
"self",
".",
"components_",
"[",
"'radii'",
"]",
"centers",
"=",
"self",
".",
"components_",
"[",
"'centers'",
"]",
"scale",
"=",
"self",
".",
"rbf_width",
"*",
"(",
"1.0",
"-",
"self",
".",
"alpha",
")",
"rbf_acts",
"=",
"scale",
"*",
"cdist",
"(",
"X",
",",
"centers",
")",
"/",
"radii",
"self",
".",
"input_activations_",
"=",
"mlp_acts",
"+",
"rbf_acts"
] | Compute input activations given X | [
"Compute",
"input",
"activations",
"given",
"X"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L363-L381 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | GRBFRandomLayer._compute_centers | def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals | python | def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals | [
"def",
"_compute_centers",
"(",
"self",
",",
"X",
",",
"sparse",
",",
"rs",
")",
":",
"super",
"(",
"GRBFRandomLayer",
",",
"self",
")",
".",
"_compute_centers",
"(",
"X",
",",
"sparse",
",",
"rs",
")",
"centers",
"=",
"self",
".",
"components_",
"[",
"'centers'",
"]",
"sorted_distances",
"=",
"np",
".",
"sort",
"(",
"squareform",
"(",
"pdist",
"(",
"centers",
")",
")",
")",
"self",
".",
"dF_vals",
"=",
"sorted_distances",
"[",
":",
",",
"-",
"1",
"]",
"self",
".",
"dN_vals",
"=",
"sorted_distances",
"[",
":",
",",
"1",
"]",
"/",
"100.0",
"#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)",
"tauNum",
"=",
"np",
".",
"log",
"(",
"np",
".",
"log",
"(",
"self",
".",
"grbf_lambda",
")",
"/",
"np",
".",
"log",
"(",
"1.0",
"-",
"self",
".",
"grbf_lambda",
")",
")",
"tauDenom",
"=",
"np",
".",
"log",
"(",
"self",
".",
"dF_vals",
"/",
"self",
".",
"dN_vals",
")",
"self",
".",
"tau_vals",
"=",
"tauNum",
"/",
"tauDenom",
"self",
".",
"_extra_args",
"[",
"'taus'",
"]",
"=",
"self",
".",
"tau_vals"
] | Generate centers, then compute tau, dF and dN vals | [
"Generate",
"centers",
"then",
"compute",
"tau",
"dF",
"and",
"dN",
"vals"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L500-L518 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/random_layer.py | GRBFRandomLayer._compute_radii | def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom | python | def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom | [
"def",
"_compute_radii",
"(",
"self",
")",
":",
"denom",
"=",
"pow",
"(",
"-",
"np",
".",
"log",
"(",
"self",
".",
"grbf_lambda",
")",
",",
"1.0",
"/",
"self",
".",
"tau_vals",
")",
"self",
".",
"components_",
"[",
"'radii'",
"]",
"=",
"self",
".",
"dF_vals",
"/",
"denom"
] | Generate radii | [
"Generate",
"radii"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/random_layer.py#L521-L525 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMRegressor._fit_regression | def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
"""
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True | python | def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
"""
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True | [
"def",
"_fit_regression",
"(",
"self",
",",
"y",
")",
":",
"if",
"self",
".",
"regressor",
"is",
"None",
":",
"self",
".",
"coefs_",
"=",
"safe_sparse_dot",
"(",
"pinv2",
"(",
"self",
".",
"hidden_activations_",
")",
",",
"y",
")",
"else",
":",
"self",
".",
"regressor",
".",
"fit",
"(",
"self",
".",
"hidden_activations_",
",",
"y",
")",
"self",
".",
"fitted_",
"=",
"True"
] | fit regression using pseudo-inverse
or supplied regressor | [
"fit",
"regression",
"using",
"pseudo",
"-",
"inverse",
"or",
"supplied",
"regressor"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L149-L160 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMRegressor.fit | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
# fit random hidden layer and compute the hidden layer activations
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
# solve the regression from hidden activations to outputs
self._fit_regression(as_float_array(y, copy=True))
return self | python | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
# fit random hidden layer and compute the hidden layer activations
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
# solve the regression from hidden activations to outputs
self._fit_regression(as_float_array(y, copy=True))
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"# fit random hidden layer and compute the hidden layer activations",
"self",
".",
"hidden_activations_",
"=",
"self",
".",
"hidden_layer",
".",
"fit_transform",
"(",
"X",
")",
"# solve the regression from hidden activations to outputs",
"self",
".",
"_fit_regression",
"(",
"as_float_array",
"(",
"y",
",",
"copy",
"=",
"True",
")",
")",
"return",
"self"
] | Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self. | [
"Fit",
"the",
"model",
"using",
"X",
"y",
"as",
"training",
"data",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L162-L188 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMRegressor._get_predictions | def _get_predictions(self):
"""
get predictions using internal least squares/supplied regressor
"""
if self.regressor is None:
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds | python | def _get_predictions(self):
"""
get predictions using internal least squares/supplied regressor
"""
if self.regressor is None:
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds | [
"def",
"_get_predictions",
"(",
"self",
")",
":",
"if",
"self",
".",
"regressor",
"is",
"None",
":",
"preds",
"=",
"safe_sparse_dot",
"(",
"self",
".",
"hidden_activations_",
",",
"self",
".",
"coefs_",
")",
"else",
":",
"preds",
"=",
"self",
".",
"regressor",
".",
"predict",
"(",
"self",
".",
"hidden_activations_",
")",
"return",
"preds"
] | get predictions using internal least squares/supplied regressor | [
"get",
"predictions",
"using",
"internal",
"least",
"squares",
"/",
"supplied",
"regressor"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L190-L199 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMRegressor.predict | def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if not self.fitted_:
raise ValueError("ELMRegressor not fitted")
# compute hidden layer activations
self.hidden_activations_ = self.hidden_layer.transform(X)
# compute output predictions for new hidden activations
predictions = self._get_predictions()
return predictions | python | def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if not self.fitted_:
raise ValueError("ELMRegressor not fitted")
# compute hidden layer activations
self.hidden_activations_ = self.hidden_layer.transform(X)
# compute output predictions for new hidden activations
predictions = self._get_predictions()
return predictions | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"if",
"not",
"self",
".",
"fitted_",
":",
"raise",
"ValueError",
"(",
"\"ELMRegressor not fitted\"",
")",
"# compute hidden layer activations",
"self",
".",
"hidden_activations_",
"=",
"self",
".",
"hidden_layer",
".",
"transform",
"(",
"X",
")",
"# compute output predictions for new hidden activations",
"predictions",
"=",
"self",
".",
"_get_predictions",
"(",
")",
"return",
"predictions"
] | Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values. | [
"Predict",
"values",
"using",
"the",
"model"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L201-L223 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMClassifier.fit | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self | python | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"classes_",
"=",
"np",
".",
"unique",
"(",
"y",
")",
"y_bin",
"=",
"self",
".",
"binarizer",
".",
"fit_transform",
"(",
"y",
")",
"self",
".",
"genelm_regressor_",
".",
"fit",
"(",
"X",
",",
"y_bin",
")",
"return",
"self"
] | Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self. | [
"Fit",
"the",
"model",
"using",
"X",
"y",
"as",
"training",
"data",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L299-L324 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | GenELMClassifier.predict | def predict(self, X):
"""Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions | python | def predict(self, X):
"""Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"raw_predictions",
"=",
"self",
".",
"decision_function",
"(",
"X",
")",
"class_predictions",
"=",
"self",
".",
"binarizer",
".",
"inverse_transform",
"(",
"raw_predictions",
")",
"return",
"class_predictions"
] | Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values. | [
"Predict",
"values",
"using",
"the",
"model"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L326-L341 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | ELMRegressor._create_random_layer | def _create_random_layer(self):
"""Pass init params to RandomLayer"""
return RandomLayer(n_hidden=self.n_hidden,
alpha=self.alpha,
random_state=self.random_state,
activation_func=self.activation_func,
activation_args=self.activation_args,
user_components=self.user_components,
rbf_width=self.rbf_width) | python | def _create_random_layer(self):
"""Pass init params to RandomLayer"""
return RandomLayer(n_hidden=self.n_hidden,
alpha=self.alpha,
random_state=self.random_state,
activation_func=self.activation_func,
activation_args=self.activation_args,
user_components=self.user_components,
rbf_width=self.rbf_width) | [
"def",
"_create_random_layer",
"(",
"self",
")",
":",
"return",
"RandomLayer",
"(",
"n_hidden",
"=",
"self",
".",
"n_hidden",
",",
"alpha",
"=",
"self",
".",
"alpha",
",",
"random_state",
"=",
"self",
".",
"random_state",
",",
"activation_func",
"=",
"self",
".",
"activation_func",
",",
"activation_args",
"=",
"self",
".",
"activation_args",
",",
"user_components",
"=",
"self",
".",
"user_components",
",",
"rbf_width",
"=",
"self",
".",
"rbf_width",
")"
] | Pass init params to RandomLayer | [
"Pass",
"init",
"params",
"to",
"RandomLayer"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L430-L439 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | ELMRegressor.fit | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl,
regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self | python | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl,
regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"rhl",
"=",
"self",
".",
"_create_random_layer",
"(",
")",
"self",
".",
"_genelm_regressor",
"=",
"GenELMRegressor",
"(",
"hidden_layer",
"=",
"rhl",
",",
"regressor",
"=",
"self",
".",
"regressor",
")",
"self",
".",
"_genelm_regressor",
".",
"fit",
"(",
"X",
",",
"y",
")",
"return",
"self"
] | Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self. | [
"Fit",
"the",
"model",
"using",
"X",
"y",
"as",
"training",
"data",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L441-L465 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | ELMRegressor.predict | def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if self._genelm_regressor is None:
raise ValueError("SimpleELMRegressor not fitted")
return self._genelm_regressor.predict(X) | python | def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if self._genelm_regressor is None:
raise ValueError("SimpleELMRegressor not fitted")
return self._genelm_regressor.predict(X) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"if",
"self",
".",
"_genelm_regressor",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"SimpleELMRegressor not fitted\"",
")",
"return",
"self",
".",
"_genelm_regressor",
".",
"predict",
"(",
"X",
")"
] | Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values. | [
"Predict",
"values",
"using",
"the",
"model"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L467-L483 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | ELMClassifier.fit | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self | python | def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"classes_",
"=",
"np",
".",
"unique",
"(",
"y",
")",
"y_bin",
"=",
"self",
".",
"binarizer",
".",
"fit_transform",
"(",
"y",
")",
"super",
"(",
"ELMClassifier",
",",
"self",
")",
".",
"fit",
"(",
"X",
",",
"y_bin",
")",
"return",
"self"
] | Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self. | [
"Fit",
"the",
"model",
"using",
"X",
"y",
"as",
"training",
"data",
"."
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L572-L598 |
wdm0006/sklearn-extensions | sklearn_extensions/extreme_learning_machines/elm.py | ELMClassifier.score | def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin"""
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X)) | python | def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin"""
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X)) | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"from",
"sklearn",
".",
"metrics",
"import",
"accuracy_score",
"return",
"accuracy_score",
"(",
"y",
",",
"self",
".",
"predict",
"(",
"X",
")",
")"
] | Force use of accuracy score since we don't inherit
from ClassifierMixin | [
"Force",
"use",
"of",
"accuracy",
"score",
"since",
"we",
"don",
"t",
"inherit",
"from",
"ClassifierMixin"
] | train | https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L618-L623 |
raphaelgyory/django-rest-messaging | rest_messaging/compat.py | compat_serializer_check_is_valid | def compat_serializer_check_is_valid(serializer):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue """
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error') | python | def compat_serializer_check_is_valid(serializer):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue """
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error') | [
"def",
"compat_serializer_check_is_valid",
"(",
"serializer",
")",
":",
"if",
"DRFVLIST",
"[",
"0",
"]",
">=",
"3",
":",
"serializer",
".",
"is_valid",
"(",
"raise_exception",
"=",
"True",
")",
"else",
":",
"if",
"not",
"serializer",
".",
"is_valid",
"(",
")",
":",
"serializers",
".",
"ValidationError",
"(",
"'The serializer raises a validation error'",
")"
] | http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue | [
"http",
":",
"//",
"www",
".",
"django",
"-",
"rest",
"-",
"framework",
".",
"org",
"/",
"topics",
"/",
"3",
".",
"0",
"-",
"announcement",
"/",
"#using",
"-",
"is_validraise_exceptiontrue"
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L23-L29 |
raphaelgyory/django-rest-messaging | rest_messaging/compat.py | compat_serializer_attr | def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj | python | def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj | [
"def",
"compat_serializer_attr",
"(",
"serializer",
",",
"obj",
")",
":",
"if",
"DRFVLIST",
"[",
"0",
"]",
"==",
"3",
"and",
"DRFVLIST",
"[",
"1",
"]",
"==",
"1",
":",
"for",
"i",
"in",
"serializer",
".",
"instance",
":",
"if",
"i",
".",
"id",
"==",
"obj",
".",
"id",
":",
"return",
"i",
"else",
":",
"return",
"obj"
] | Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything. | [
"Required",
"only",
"for",
"DRF",
"3",
".",
"1",
"which",
"does",
"not",
"make",
"dynamically",
"added",
"attribute",
"available",
"in",
"obj",
"in",
"serializer",
".",
"This",
"is",
"a",
"quick",
"solution",
"but",
"works",
"without",
"breajing",
"anything",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L40-L50 |
raphaelgyory/django-rest-messaging | rest_messaging/compat.py | compat_get_paginated_response | def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data) | python | def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data) | [
"def",
"compat_get_paginated_response",
"(",
"view",
",",
"page",
")",
":",
"if",
"DRFVLIST",
"[",
"0",
"]",
"==",
"3",
"and",
"DRFVLIST",
"[",
"1",
"]",
">=",
"1",
":",
"from",
"rest_messaging",
".",
"serializers",
"import",
"ComplexMessageSerializer",
"# circular import",
"serializer",
"=",
"ComplexMessageSerializer",
"(",
"page",
",",
"many",
"=",
"True",
")",
"return",
"view",
".",
"get_paginated_response",
"(",
"serializer",
".",
"data",
")",
"else",
":",
"serializer",
"=",
"view",
".",
"get_pagination_serializer",
"(",
"page",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
")"
] | get_paginated_response is unknown to DRF 3.0 | [
"get_paginated_response",
"is",
"unknown",
"to",
"DRF",
"3",
".",
"0"
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L67-L75 |
raphaelgyory/django-rest-messaging | rest_messaging/compat.py | compat_pagination_messages | def compat_pagination_messages(cls):
"""
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls | python | def compat_pagination_messages(cls):
"""
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls | [
"def",
"compat_pagination_messages",
"(",
"cls",
")",
":",
"if",
"DRFVLIST",
"[",
"0",
"]",
"==",
"3",
"and",
"DRFVLIST",
"[",
"1",
"]",
">=",
"1",
":",
"setattr",
"(",
"cls",
",",
"\"pagination_class\"",
",",
"MessagePagination",
")",
"return",
"cls",
"else",
":",
"# DRF 2 pagination",
"setattr",
"(",
"cls",
",",
"\"paginate_by\"",
",",
"getattr",
"(",
"settings",
",",
"\"DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE\"",
",",
"30",
")",
")",
"return",
"cls"
] | For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level. | [
"For",
"DRF",
"3",
".",
"1",
"and",
"higher",
"pagination",
"is",
"defined",
"at",
"the",
"paginator",
"level",
"(",
"see",
"http",
":",
"//",
"www",
".",
"django",
"-",
"rest",
"-",
"framework",
".",
"org",
"/",
"topics",
"/",
"3",
".",
"2",
"-",
"announcement",
"/",
")",
".",
"For",
"DRF",
"3",
".",
"0",
"and",
"lower",
"it",
"can",
"be",
"handled",
"at",
"the",
"view",
"level",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L78-L89 |
raphaelgyory/django-rest-messaging | rest_messaging/serializers.py | ThreadSerializer.get_participants | def get_participants(self, obj):
""" Allows to define a callback for serializing information about the user. """
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj) | python | def get_participants(self, obj):
""" Allows to define a callback for serializing information about the user. """
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj) | [
"def",
"get_participants",
"(",
"self",
",",
"obj",
")",
":",
"# we set the many to many serialization to False, because we only want it with retrieve requests",
"if",
"self",
".",
"callback",
"is",
"None",
":",
"return",
"[",
"participant",
".",
"id",
"for",
"participant",
"in",
"obj",
".",
"participants",
".",
"all",
"(",
")",
"]",
"else",
":",
"# we do not want user information",
"return",
"self",
".",
"callback",
"(",
"obj",
")"
] | Allows to define a callback for serializing information about the user. | [
"Allows",
"to",
"define",
"a",
"callback",
"for",
"serializing",
"information",
"about",
"the",
"user",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L26-L33 |
raphaelgyory/django-rest-messaging | rest_messaging/serializers.py | ComplexMessageSerializer.get_is_notification | def get_is_notification(self, obj):
""" We say if the message should trigger a notification """
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False | python | def get_is_notification(self, obj):
""" We say if the message should trigger a notification """
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False | [
"def",
"get_is_notification",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"o",
"=",
"compat_serializer_attr",
"(",
"self",
",",
"obj",
")",
"return",
"o",
".",
"is_notification",
"except",
"Exception",
":",
"return",
"False"
] | We say if the message should trigger a notification | [
"We",
"say",
"if",
"the",
"message",
"should",
"trigger",
"a",
"notification"
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L57-L63 |
raphaelgyory/django-rest-messaging | rest_messaging/serializers.py | ComplexMessageSerializer.get_readers | def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return [] | python | def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return [] | [
"def",
"get_readers",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"o",
"=",
"compat_serializer_attr",
"(",
"self",
",",
"obj",
")",
"return",
"o",
".",
"readers",
"except",
"Exception",
":",
"return",
"[",
"]"
] | Return the ids of the people who read the message instance. | [
"Return",
"the",
"ids",
"of",
"the",
"people",
"who",
"read",
"the",
"message",
"instance",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L65-L71 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | ThreadManager.get_threads_where_participant_is_active | def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct() | python | def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct() | [
"def",
"get_threads_where_participant_is_active",
"(",
"self",
",",
"participant_id",
")",
":",
"participations",
"=",
"Participation",
".",
"objects",
".",
"filter",
"(",
"participant__id",
"=",
"participant_id",
")",
".",
"exclude",
"(",
"date_left__lte",
"=",
"now",
"(",
")",
")",
".",
"distinct",
"(",
")",
".",
"select_related",
"(",
"'thread'",
")",
"return",
"Thread",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"[",
"p",
".",
"thread",
".",
"id",
"for",
"p",
"in",
"participations",
"]",
")",
".",
"distinct",
"(",
")"
] | Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. | [
"Gets",
"all",
"the",
"threads",
"in",
"which",
"the",
"current",
"participant",
"is",
"involved",
".",
"The",
"method",
"excludes",
"threads",
"where",
"the",
"participant",
"has",
"left",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L34-L44 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | ThreadManager.get_active_threads_involving_all_participants | def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct() | python | def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct() | [
"def",
"get_active_threads_involving_all_participants",
"(",
"self",
",",
"*",
"participant_ids",
")",
":",
"query",
"=",
"Thread",
".",
"objects",
".",
"exclude",
"(",
"participation__date_left__lte",
"=",
"now",
"(",
")",
")",
".",
"annotate",
"(",
"count_participants",
"=",
"Count",
"(",
"'participants'",
")",
")",
".",
"filter",
"(",
"count_participants",
"=",
"len",
"(",
"participant_ids",
")",
")",
"for",
"participant_id",
"in",
"participant_ids",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"participants__id",
"=",
"participant_id",
")",
"return",
"query",
".",
"distinct",
"(",
")"
] | Gets the threads where the specified participants are active and no one has left. | [
"Gets",
"the",
"threads",
"where",
"the",
"specified",
"participants",
"are",
"active",
"and",
"no",
"one",
"has",
"left",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L46-L57 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | ThreadManager.get_or_create_thread | def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
"""
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread | python | def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
"""
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread | [
"def",
"get_or_create_thread",
"(",
"self",
",",
"request",
",",
"name",
"=",
"None",
",",
"*",
"participant_ids",
")",
":",
"# we get the current participant",
"# or create him if he does not exit",
"participant_ids",
"=",
"list",
"(",
"participant_ids",
")",
"if",
"request",
".",
"rest_messaging_participant",
".",
"id",
"not",
"in",
"participant_ids",
":",
"participant_ids",
".",
"append",
"(",
"request",
".",
"rest_messaging_participant",
".",
"id",
")",
"# we need at least one other participant",
"if",
"len",
"(",
"participant_ids",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'At least two participants are required.'",
")",
"if",
"getattr",
"(",
"settings",
",",
"\"REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS\"",
",",
"True",
")",
"is",
"True",
":",
"# if we limit the number of threads by active participants",
"# we ensure a thread is not already running",
"existing_threads",
"=",
"self",
".",
"get_active_threads_involving_all_participants",
"(",
"*",
"participant_ids",
")",
"if",
"len",
"(",
"list",
"(",
"existing_threads",
")",
")",
">",
"0",
":",
"return",
"existing_threads",
"[",
"0",
"]",
"# we have no existing Thread or multiple Thread instances are allowed",
"thread",
"=",
"Thread",
".",
"objects",
".",
"create",
"(",
"name",
"=",
"name",
")",
"# we add the participants",
"thread",
".",
"add_participants",
"(",
"request",
",",
"*",
"participant_ids",
")",
"# we send a signal to say the thread with participants is created",
"post_save",
".",
"send",
"(",
"Thread",
",",
"instance",
"=",
"thread",
",",
"created",
"=",
"True",
",",
"created_and_add_participants",
"=",
"True",
",",
"request_participant_id",
"=",
"request",
".",
"rest_messaging_participant",
".",
"id",
")",
"return",
"thread"
] | When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one. | [
"When",
"a",
"Participant",
"posts",
"a",
"message",
"to",
"other",
"participants",
"without",
"specifying",
"an",
"existing",
"Thread",
"we",
"must",
"1",
".",
"Create",
"a",
"new",
"Thread",
"if",
"they",
"have",
"not",
"yet",
"opened",
"the",
"discussion",
".",
"2",
".",
"If",
"they",
"have",
"already",
"opened",
"the",
"discussion",
"and",
"multiple",
"Threads",
"are",
"not",
"allowed",
"for",
"the",
"same",
"users",
"we",
"must",
"re",
"-",
"attach",
"this",
"message",
"to",
"the",
"existing",
"thread",
".",
"3",
".",
"If",
"they",
"have",
"already",
"opened",
"the",
"discussion",
"and",
"multiple",
"Threads",
"are",
"allowed",
"we",
"simply",
"create",
"a",
"new",
"one",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L59-L96 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | MessageManager.return_daily_messages_count | def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() | python | def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() | [
"def",
"return_daily_messages_count",
"(",
"self",
",",
"sender",
")",
":",
"h24",
"=",
"now",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"1",
")",
"return",
"Message",
".",
"objects",
".",
"filter",
"(",
"sender",
"=",
"sender",
",",
"sent_at__gte",
"=",
"h24",
")",
".",
"count",
"(",
")"
] | Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits | [
"Returns",
"the",
"number",
"of",
"messages",
"sent",
"in",
"the",
"last",
"24",
"hours",
"so",
"we",
"can",
"ensure",
"the",
"user",
"does",
"not",
"exceed",
"his",
"messaging",
"limits"
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L191-L194 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | MessageManager.check_who_read | def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages | python | def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages | [
"def",
"check_who_read",
"(",
"self",
",",
"messages",
")",
":",
"# we get the corresponding Participation objects",
"for",
"m",
"in",
"messages",
":",
"readers",
"=",
"[",
"]",
"for",
"p",
"in",
"m",
".",
"thread",
".",
"participation_set",
".",
"all",
"(",
")",
":",
"if",
"p",
".",
"date_last_check",
"is",
"None",
":",
"pass",
"elif",
"p",
".",
"date_last_check",
">",
"m",
".",
"sent_at",
":",
"# the message has been read",
"readers",
".",
"append",
"(",
"p",
".",
"participant",
".",
"id",
")",
"setattr",
"(",
"m",
",",
"\"readers\"",
",",
"readers",
")",
"return",
"messages"
] | Check who read each message. | [
"Check",
"who",
"read",
"each",
"message",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L196-L209 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | MessageManager.check_is_notification | def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages | python | def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages | [
"def",
"check_is_notification",
"(",
"self",
",",
"participant_id",
",",
"messages",
")",
":",
"try",
":",
"# we get the last check",
"last_check",
"=",
"NotificationCheck",
".",
"objects",
".",
"filter",
"(",
"participant__id",
"=",
"participant_id",
")",
".",
"latest",
"(",
"'id'",
")",
".",
"date_check",
"except",
"Exception",
":",
"# we have no notification check",
"# all the messages are considered as new",
"for",
"m",
"in",
"messages",
":",
"m",
".",
"is_notification",
"=",
"True",
"return",
"messages",
"for",
"m",
"in",
"messages",
":",
"if",
"m",
".",
"sent_at",
">",
"last_check",
"and",
"m",
".",
"sender",
".",
"id",
"!=",
"participant_id",
":",
"setattr",
"(",
"m",
",",
"\"is_notification\"",
",",
"True",
")",
"else",
":",
"setattr",
"(",
"m",
",",
"\"is_notification\"",
",",
"False",
")",
"return",
"messages"
] | Check if each message requires a notification for the specified participant. | [
"Check",
"if",
"each",
"message",
"requires",
"a",
"notification",
"for",
"the",
"specified",
"participant",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L211-L228 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | MessageManager.get_lasts_messages_of_threads | def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages | python | def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages | [
"def",
"get_lasts_messages_of_threads",
"(",
"self",
",",
"participant_id",
",",
"check_who_read",
"=",
"True",
",",
"check_is_notification",
"=",
"True",
")",
":",
"# we get the last message for each thread",
"# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')",
"threads",
"=",
"Thread",
".",
"managers",
".",
"get_threads_where_participant_is_active",
"(",
"participant_id",
")",
".",
"annotate",
"(",
"last_message_id",
"=",
"Max",
"(",
"'message__id'",
")",
")",
"messages",
"=",
"Message",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"[",
"thread",
".",
"last_message_id",
"for",
"thread",
"in",
"threads",
"]",
")",
".",
"order_by",
"(",
"'-id'",
")",
".",
"distinct",
"(",
")",
".",
"select_related",
"(",
"'thread'",
",",
"'sender'",
")",
"if",
"check_who_read",
"is",
"True",
":",
"messages",
"=",
"messages",
".",
"prefetch_related",
"(",
"'thread__participation_set'",
",",
"'thread__participation_set__participant'",
")",
"messages",
"=",
"self",
".",
"check_who_read",
"(",
"messages",
")",
"else",
":",
"messages",
"=",
"messages",
".",
"prefetch_related",
"(",
"'thread__participants'",
")",
"if",
"check_is_notification",
"is",
"True",
":",
"messages",
"=",
"self",
".",
"check_is_notification",
"(",
"participant_id",
",",
"messages",
")",
"return",
"messages"
] | Returns the last message in each thread | [
"Returns",
"the",
"last",
"message",
"in",
"each",
"thread"
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L230-L250 |
raphaelgyory/django-rest-messaging | rest_messaging/models.py | MessageManager.get_all_messages_in_thread | def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages | python | def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages | [
"def",
"get_all_messages_in_thread",
"(",
"self",
",",
"participant_id",
",",
"thread_id",
",",
"check_who_read",
"=",
"True",
")",
":",
"try",
":",
"messages",
"=",
"Message",
".",
"objects",
".",
"filter",
"(",
"thread__id",
"=",
"thread_id",
")",
".",
"order_by",
"(",
"'-id'",
")",
".",
"select_related",
"(",
"'thread'",
")",
".",
"prefetch_related",
"(",
"'thread__participation_set'",
",",
"'thread__participation_set__participant'",
")",
"except",
"Exception",
":",
"return",
"Message",
".",
"objects",
".",
"none",
"(",
")",
"messages",
"=",
"self",
".",
"check_who_read",
"(",
"messages",
")",
"return",
"messages"
] | Returns all the messages in a thread. | [
"Returns",
"all",
"the",
"messages",
"in",
"a",
"thread",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L252-L263 |
raphaelgyory/django-rest-messaging | rest_messaging/views.py | ThreadView.create | def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) | python | def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) | [
"def",
"create",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"data",
"=",
"compat_get_request_data",
"(",
"request",
")",
")",
"compat_serializer_check_is_valid",
"(",
"serializer",
")",
"self",
".",
"perform_create",
"(",
"request",
",",
"serializer",
")",
"headers",
"=",
"self",
".",
"get_success_headers",
"(",
"serializer",
".",
"data",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
",",
"status",
"=",
"status",
".",
"HTTP_201_CREATED",
",",
"headers",
"=",
"headers",
")"
] | We ensure the Thread only involves eligible participants. | [
"We",
"ensure",
"the",
"Thread",
"only",
"involves",
"eligible",
"participants",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/views.py#L36-L42 |
raphaelgyory/django-rest-messaging | rest_messaging/views.py | ThreadView.mark_thread_as_read | def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST) | python | def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST) | [
"def",
"mark_thread_as_read",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"# we get the thread and check for permission",
"thread",
"=",
"Thread",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"pk",
")",
"self",
".",
"check_object_permissions",
"(",
"request",
",",
"thread",
")",
"# we save the date",
"try",
":",
"participation",
"=",
"Participation",
".",
"objects",
".",
"get",
"(",
"thread",
"=",
"thread",
",",
"participant",
"=",
"request",
".",
"rest_messaging_participant",
")",
"participation",
".",
"date_last_check",
"=",
"now",
"(",
")",
"participation",
".",
"save",
"(",
")",
"# we return the thread",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"thread",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
")",
"except",
"Exception",
":",
"return",
"Response",
"(",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | Pk is the pk of the Thread to which the messages belong. | [
"Pk",
"is",
"the",
"pk",
"of",
"the",
"Thread",
"to",
"which",
"the",
"messages",
"belong",
"."
] | train | https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/views.py#L107-L121 |
letuananh/chirptext | chirptext/chirpnet.py | SmartURL.process | def process(self, quoted=False):
''' Parse an URL '''
self.p = urlparse(self.raw)
self.scheme = self.p.scheme
self.netloc = self.p.netloc
self.opath = self.p.path if not quoted else quote(self.p.path)
self.path = [x for x in self.opath.split('/') if x]
self.params = self.p.params
self.query = parse_qs(self.p.query, keep_blank_values=True)
self.fragment = self.p.fragment | python | def process(self, quoted=False):
''' Parse an URL '''
self.p = urlparse(self.raw)
self.scheme = self.p.scheme
self.netloc = self.p.netloc
self.opath = self.p.path if not quoted else quote(self.p.path)
self.path = [x for x in self.opath.split('/') if x]
self.params = self.p.params
self.query = parse_qs(self.p.query, keep_blank_values=True)
self.fragment = self.p.fragment | [
"def",
"process",
"(",
"self",
",",
"quoted",
"=",
"False",
")",
":",
"self",
".",
"p",
"=",
"urlparse",
"(",
"self",
".",
"raw",
")",
"self",
".",
"scheme",
"=",
"self",
".",
"p",
".",
"scheme",
"self",
".",
"netloc",
"=",
"self",
".",
"p",
".",
"netloc",
"self",
".",
"opath",
"=",
"self",
".",
"p",
".",
"path",
"if",
"not",
"quoted",
"else",
"quote",
"(",
"self",
".",
"p",
".",
"path",
")",
"self",
".",
"path",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"opath",
".",
"split",
"(",
"'/'",
")",
"if",
"x",
"]",
"self",
".",
"params",
"=",
"self",
".",
"p",
".",
"params",
"self",
".",
"query",
"=",
"parse_qs",
"(",
"self",
".",
"p",
".",
"query",
",",
"keep_blank_values",
"=",
"True",
")",
"self",
".",
"fragment",
"=",
"self",
".",
"p",
".",
"fragment"
] | Parse an URL | [
"Parse",
"an",
"URL"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chirpnet.py#L61-L70 |
letuananh/chirptext | chirptext/chirpnet.py | WebHelper.fetch | def fetch(self, url, encoding=None, force_refetch=False, nocache=False, quiet=True):
''' Fetch a HTML file as binary'''
try:
if not force_refetch and self.cache is not None and url in self.cache:
# try to look for content in cache
logging.debug('Retrieving content from cache for {}'.format(url))
return self.cache.retrieve_blob(url, encoding)
encoded_url = WebHelper.encode_url(url)
req = Request(encoded_url, headers={'User-Agent': 'Mozilla/5.0'})
# support gzip
req.add_header('Accept-encoding', 'gzip, deflate')
# Open URL
getLogger().info("Fetching: {url} |".format(url=url))
response = urlopen(req)
content = response.read()
# unzip if required
if 'Content-Encoding' in response.info() and response.info().get('Content-Encoding') == 'gzip':
# unzip
with gzip.open(BytesIO(content)) as gzfile:
content = gzfile.read()
# update cache if required
if self.cache is not None and not nocache:
if url not in self.cache:
self.cache.insert_blob(url, content)
return content.decode(encoding) if content and encoding else content
except URLError as e:
if hasattr(e, 'reason'):
getLogger().exception('We failed to reach {}. Reason: {}'.format(url, e.reason))
elif hasattr(e, 'code'):
getLogger().exception('The server couldn\'t fulfill the request. Error code: {}'.format(e.code))
else:
# Other exception ...
getLogger().exception("Fetching error")
if not quiet:
raise
return None | python | def fetch(self, url, encoding=None, force_refetch=False, nocache=False, quiet=True):
''' Fetch a HTML file as binary'''
try:
if not force_refetch and self.cache is not None and url in self.cache:
# try to look for content in cache
logging.debug('Retrieving content from cache for {}'.format(url))
return self.cache.retrieve_blob(url, encoding)
encoded_url = WebHelper.encode_url(url)
req = Request(encoded_url, headers={'User-Agent': 'Mozilla/5.0'})
# support gzip
req.add_header('Accept-encoding', 'gzip, deflate')
# Open URL
getLogger().info("Fetching: {url} |".format(url=url))
response = urlopen(req)
content = response.read()
# unzip if required
if 'Content-Encoding' in response.info() and response.info().get('Content-Encoding') == 'gzip':
# unzip
with gzip.open(BytesIO(content)) as gzfile:
content = gzfile.read()
# update cache if required
if self.cache is not None and not nocache:
if url not in self.cache:
self.cache.insert_blob(url, content)
return content.decode(encoding) if content and encoding else content
except URLError as e:
if hasattr(e, 'reason'):
getLogger().exception('We failed to reach {}. Reason: {}'.format(url, e.reason))
elif hasattr(e, 'code'):
getLogger().exception('The server couldn\'t fulfill the request. Error code: {}'.format(e.code))
else:
# Other exception ...
getLogger().exception("Fetching error")
if not quiet:
raise
return None | [
"def",
"fetch",
"(",
"self",
",",
"url",
",",
"encoding",
"=",
"None",
",",
"force_refetch",
"=",
"False",
",",
"nocache",
"=",
"False",
",",
"quiet",
"=",
"True",
")",
":",
"try",
":",
"if",
"not",
"force_refetch",
"and",
"self",
".",
"cache",
"is",
"not",
"None",
"and",
"url",
"in",
"self",
".",
"cache",
":",
"# try to look for content in cache",
"logging",
".",
"debug",
"(",
"'Retrieving content from cache for {}'",
".",
"format",
"(",
"url",
")",
")",
"return",
"self",
".",
"cache",
".",
"retrieve_blob",
"(",
"url",
",",
"encoding",
")",
"encoded_url",
"=",
"WebHelper",
".",
"encode_url",
"(",
"url",
")",
"req",
"=",
"Request",
"(",
"encoded_url",
",",
"headers",
"=",
"{",
"'User-Agent'",
":",
"'Mozilla/5.0'",
"}",
")",
"# support gzip",
"req",
".",
"add_header",
"(",
"'Accept-encoding'",
",",
"'gzip, deflate'",
")",
"# Open URL",
"getLogger",
"(",
")",
".",
"info",
"(",
"\"Fetching: {url} |\"",
".",
"format",
"(",
"url",
"=",
"url",
")",
")",
"response",
"=",
"urlopen",
"(",
"req",
")",
"content",
"=",
"response",
".",
"read",
"(",
")",
"# unzip if required",
"if",
"'Content-Encoding'",
"in",
"response",
".",
"info",
"(",
")",
"and",
"response",
".",
"info",
"(",
")",
".",
"get",
"(",
"'Content-Encoding'",
")",
"==",
"'gzip'",
":",
"# unzip",
"with",
"gzip",
".",
"open",
"(",
"BytesIO",
"(",
"content",
")",
")",
"as",
"gzfile",
":",
"content",
"=",
"gzfile",
".",
"read",
"(",
")",
"# update cache if required",
"if",
"self",
".",
"cache",
"is",
"not",
"None",
"and",
"not",
"nocache",
":",
"if",
"url",
"not",
"in",
"self",
".",
"cache",
":",
"self",
".",
"cache",
".",
"insert_blob",
"(",
"url",
",",
"content",
")",
"return",
"content",
".",
"decode",
"(",
"encoding",
")",
"if",
"content",
"and",
"encoding",
"else",
"content",
"except",
"URLError",
"as",
"e",
":",
"if",
"hasattr",
"(",
"e",
",",
"'reason'",
")",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"'We failed to reach {}. Reason: {}'",
".",
"format",
"(",
"url",
",",
"e",
".",
"reason",
")",
")",
"elif",
"hasattr",
"(",
"e",
",",
"'code'",
")",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"'The server couldn\\'t fulfill the request. Error code: {}'",
".",
"format",
"(",
"e",
".",
"code",
")",
")",
"else",
":",
"# Other exception ...",
"getLogger",
"(",
")",
".",
"exception",
"(",
"\"Fetching error\"",
")",
"if",
"not",
"quiet",
":",
"raise",
"return",
"None"
] | Fetch a HTML file as binary | [
"Fetch",
"a",
"HTML",
"file",
"as",
"binary"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chirpnet.py#L90-L125 |
letuananh/chirptext | chirptext/chirpnet.py | WebHelper.download | def download(self, url, path, force_refetch=False, nocache=False):
''' Download a file at $url and save it to $path
'''
# Enable cache
if os.path.isfile(path):
getLogger().info("File exists, download task skipped -> {path}".format(path=path))
return True
try:
# Open URL
getLogger().info("Downloading: {url} -> {path}".format(url=url, path=path))
response = self.fetch(url, force_refetch=force_refetch, nocache=nocache)
if response is not None:
# Download file
local_file = open(path, "wb")
local_file.write(response)
local_file.close()
# Finished
return True
else:
return False
except Exception as e:
if hasattr(e, 'reason'):
getLogger().exception('We failed to reach a server. Reason: %s' % (e.reason,))
elif hasattr(e, 'code'):
getLogger().exception("The server couldn't fulfill the request. Error code: {code}".format(code=e.code))
else:
# everything is fine
getLogger().exception("Unknown error: %s" % (e,))
return False | python | def download(self, url, path, force_refetch=False, nocache=False):
''' Download a file at $url and save it to $path
'''
# Enable cache
if os.path.isfile(path):
getLogger().info("File exists, download task skipped -> {path}".format(path=path))
return True
try:
# Open URL
getLogger().info("Downloading: {url} -> {path}".format(url=url, path=path))
response = self.fetch(url, force_refetch=force_refetch, nocache=nocache)
if response is not None:
# Download file
local_file = open(path, "wb")
local_file.write(response)
local_file.close()
# Finished
return True
else:
return False
except Exception as e:
if hasattr(e, 'reason'):
getLogger().exception('We failed to reach a server. Reason: %s' % (e.reason,))
elif hasattr(e, 'code'):
getLogger().exception("The server couldn't fulfill the request. Error code: {code}".format(code=e.code))
else:
# everything is fine
getLogger().exception("Unknown error: %s" % (e,))
return False | [
"def",
"download",
"(",
"self",
",",
"url",
",",
"path",
",",
"force_refetch",
"=",
"False",
",",
"nocache",
"=",
"False",
")",
":",
"# Enable cache",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"getLogger",
"(",
")",
".",
"info",
"(",
"\"File exists, download task skipped -> {path}\"",
".",
"format",
"(",
"path",
"=",
"path",
")",
")",
"return",
"True",
"try",
":",
"# Open URL",
"getLogger",
"(",
")",
".",
"info",
"(",
"\"Downloading: {url} -> {path}\"",
".",
"format",
"(",
"url",
"=",
"url",
",",
"path",
"=",
"path",
")",
")",
"response",
"=",
"self",
".",
"fetch",
"(",
"url",
",",
"force_refetch",
"=",
"force_refetch",
",",
"nocache",
"=",
"nocache",
")",
"if",
"response",
"is",
"not",
"None",
":",
"# Download file",
"local_file",
"=",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"local_file",
".",
"write",
"(",
"response",
")",
"local_file",
".",
"close",
"(",
")",
"# Finished",
"return",
"True",
"else",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"if",
"hasattr",
"(",
"e",
",",
"'reason'",
")",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"'We failed to reach a server. Reason: %s'",
"%",
"(",
"e",
".",
"reason",
",",
")",
")",
"elif",
"hasattr",
"(",
"e",
",",
"'code'",
")",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"\"The server couldn't fulfill the request. Error code: {code}\"",
".",
"format",
"(",
"code",
"=",
"e",
".",
"code",
")",
")",
"else",
":",
"# everything is fine",
"getLogger",
"(",
")",
".",
"exception",
"(",
"\"Unknown error: %s\"",
"%",
"(",
"e",
",",
")",
")",
"return",
"False"
] | Download a file at $url and save it to $path | [
"Download",
"a",
"file",
"at",
"$url",
"and",
"save",
"it",
"to",
"$path"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/chirpnet.py#L134-L162 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._platform | def _platform(self) -> Optional[str]:
"""Extract platform."""
try:
return str(self.journey.MainStop.BasicStop.Dep.Platform.text)
except AttributeError:
return None | python | def _platform(self) -> Optional[str]:
"""Extract platform."""
try:
return str(self.journey.MainStop.BasicStop.Dep.Platform.text)
except AttributeError:
return None | [
"def",
"_platform",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"try",
":",
"return",
"str",
"(",
"self",
".",
"journey",
".",
"MainStop",
".",
"BasicStop",
".",
"Dep",
".",
"Platform",
".",
"text",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | Extract platform. | [
"Extract",
"platform",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L35-L40 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._delay | def _delay(self) -> int:
"""Extract departure delay."""
try:
return int(self.journey.MainStop.BasicStop.Dep.Delay.text)
except AttributeError:
return 0 | python | def _delay(self) -> int:
"""Extract departure delay."""
try:
return int(self.journey.MainStop.BasicStop.Dep.Delay.text)
except AttributeError:
return 0 | [
"def",
"_delay",
"(",
"self",
")",
"->",
"int",
":",
"try",
":",
"return",
"int",
"(",
"self",
".",
"journey",
".",
"MainStop",
".",
"BasicStop",
".",
"Dep",
".",
"Delay",
".",
"text",
")",
"except",
"AttributeError",
":",
"return",
"0"
] | Extract departure delay. | [
"Extract",
"departure",
"delay",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L42-L47 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._departure | def _departure(self) -> datetime:
"""Extract departure time."""
departure_time = datetime.strptime(
self.journey.MainStop.BasicStop.Dep.Time.text, "%H:%M"
).time()
if departure_time > (self.now - timedelta(hours=1)).time():
return datetime.combine(self.now.date(), departure_time)
return datetime.combine(self.now.date() + timedelta(days=1), departure_time) | python | def _departure(self) -> datetime:
"""Extract departure time."""
departure_time = datetime.strptime(
self.journey.MainStop.BasicStop.Dep.Time.text, "%H:%M"
).time()
if departure_time > (self.now - timedelta(hours=1)).time():
return datetime.combine(self.now.date(), departure_time)
return datetime.combine(self.now.date() + timedelta(days=1), departure_time) | [
"def",
"_departure",
"(",
"self",
")",
"->",
"datetime",
":",
"departure_time",
"=",
"datetime",
".",
"strptime",
"(",
"self",
".",
"journey",
".",
"MainStop",
".",
"BasicStop",
".",
"Dep",
".",
"Time",
".",
"text",
",",
"\"%H:%M\"",
")",
".",
"time",
"(",
")",
"if",
"departure_time",
">",
"(",
"self",
".",
"now",
"-",
"timedelta",
"(",
"hours",
"=",
"1",
")",
")",
".",
"time",
"(",
")",
":",
"return",
"datetime",
".",
"combine",
"(",
"self",
".",
"now",
".",
"date",
"(",
")",
",",
"departure_time",
")",
"return",
"datetime",
".",
"combine",
"(",
"self",
".",
"now",
".",
"date",
"(",
")",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
",",
"departure_time",
")"
] | Extract departure time. | [
"Extract",
"departure",
"time",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L49-L56 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._extract | def _extract(self, attribute) -> str:
"""Extract train information."""
attr_data = self.journey.JourneyAttributeList.JourneyAttribute[
self.attr_types.index(attribute)
].Attribute
attr_variants = attr_data.xpath("AttributeVariant/@type")
data = attr_data.AttributeVariant[attr_variants.index("NORMAL")].Text.pyval
return str(data) | python | def _extract(self, attribute) -> str:
"""Extract train information."""
attr_data = self.journey.JourneyAttributeList.JourneyAttribute[
self.attr_types.index(attribute)
].Attribute
attr_variants = attr_data.xpath("AttributeVariant/@type")
data = attr_data.AttributeVariant[attr_variants.index("NORMAL")].Text.pyval
return str(data) | [
"def",
"_extract",
"(",
"self",
",",
"attribute",
")",
"->",
"str",
":",
"attr_data",
"=",
"self",
".",
"journey",
".",
"JourneyAttributeList",
".",
"JourneyAttribute",
"[",
"self",
".",
"attr_types",
".",
"index",
"(",
"attribute",
")",
"]",
".",
"Attribute",
"attr_variants",
"=",
"attr_data",
".",
"xpath",
"(",
"\"AttributeVariant/@type\"",
")",
"data",
"=",
"attr_data",
".",
"AttributeVariant",
"[",
"attr_variants",
".",
"index",
"(",
"\"NORMAL\"",
")",
"]",
".",
"Text",
".",
"pyval",
"return",
"str",
"(",
"data",
")"
] | Extract train information. | [
"Extract",
"train",
"information",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L66-L73 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._info | def _info(self) -> Optional[str]:
"""Extract journey information."""
try:
return str(html.unescape(self.journey.InfoTextList.InfoText.get("text")))
except AttributeError:
return None | python | def _info(self) -> Optional[str]:
"""Extract journey information."""
try:
return str(html.unescape(self.journey.InfoTextList.InfoText.get("text")))
except AttributeError:
return None | [
"def",
"_info",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"try",
":",
"return",
"str",
"(",
"html",
".",
"unescape",
"(",
"self",
".",
"journey",
".",
"InfoTextList",
".",
"InfoText",
".",
"get",
"(",
"\"text\"",
")",
")",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | Extract journey information. | [
"Extract",
"journey",
"information",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L75-L80 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._info_long | def _info_long(self) -> Optional[str]:
"""Extract journey information."""
try:
return str(
html.unescape(self.journey.InfoTextList.InfoText.get("textL")).replace(
"<br />", "\n"
)
)
except AttributeError:
return None | python | def _info_long(self) -> Optional[str]:
"""Extract journey information."""
try:
return str(
html.unescape(self.journey.InfoTextList.InfoText.get("textL")).replace(
"<br />", "\n"
)
)
except AttributeError:
return None | [
"def",
"_info_long",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"try",
":",
"return",
"str",
"(",
"html",
".",
"unescape",
"(",
"self",
".",
"journey",
".",
"InfoTextList",
".",
"InfoText",
".",
"get",
"(",
"\"textL\"",
")",
")",
".",
"replace",
"(",
"\"<br />\"",
",",
"\"\\n\"",
")",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | Extract journey information. | [
"Extract",
"journey",
"information",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L82-L91 |
cgtobi/PyRMVtransport | RMVtransport/rmvjourney.py | RMVJourney._pass_list | def _pass_list(self) -> List[Dict[str, Any]]:
"""Extract next stops along the journey."""
stops: List[Dict[str, Any]] = []
for stop in self.journey.PassList.BasicStop:
index = stop.get("index")
station = stop.Location.Station.HafasName.Text.text
station_id = stop.Location.Station.ExternalId.text
stops.append({"index": index, "stationId": station_id, "station": station})
return stops | python | def _pass_list(self) -> List[Dict[str, Any]]:
"""Extract next stops along the journey."""
stops: List[Dict[str, Any]] = []
for stop in self.journey.PassList.BasicStop:
index = stop.get("index")
station = stop.Location.Station.HafasName.Text.text
station_id = stop.Location.Station.ExternalId.text
stops.append({"index": index, "stationId": station_id, "station": station})
return stops | [
"def",
"_pass_list",
"(",
"self",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"stops",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"[",
"]",
"for",
"stop",
"in",
"self",
".",
"journey",
".",
"PassList",
".",
"BasicStop",
":",
"index",
"=",
"stop",
".",
"get",
"(",
"\"index\"",
")",
"station",
"=",
"stop",
".",
"Location",
".",
"Station",
".",
"HafasName",
".",
"Text",
".",
"text",
"station_id",
"=",
"stop",
".",
"Location",
".",
"Station",
".",
"ExternalId",
".",
"text",
"stops",
".",
"append",
"(",
"{",
"\"index\"",
":",
"index",
",",
"\"stationId\"",
":",
"station_id",
",",
"\"station\"",
":",
"station",
"}",
")",
"return",
"stops"
] | Extract next stops along the journey. | [
"Extract",
"next",
"stops",
"along",
"the",
"journey",
"."
] | train | https://github.com/cgtobi/PyRMVtransport/blob/20a0d68ecfdedceb32e8ca96c381fdec7e2069c7/RMVtransport/rmvjourney.py#L93-L101 |
pyout/pyout | pyout/elements.py | validate | def validate(style):
"""Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not valid.
"""
try:
import jsonschema
except ImportError:
return
try:
jsonschema.validate(style, schema)
except jsonschema.ValidationError as exc:
new_exc = StyleValidationError(exc)
# Don't dump the original jsonschema exception because it is already
# included in the StyleValidationError's message.
new_exc.__cause__ = None
raise new_exc | python | def validate(style):
"""Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not valid.
"""
try:
import jsonschema
except ImportError:
return
try:
jsonschema.validate(style, schema)
except jsonschema.ValidationError as exc:
new_exc = StyleValidationError(exc)
# Don't dump the original jsonschema exception because it is already
# included in the StyleValidationError's message.
new_exc.__cause__ = None
raise new_exc | [
"def",
"validate",
"(",
"style",
")",
":",
"try",
":",
"import",
"jsonschema",
"except",
"ImportError",
":",
"return",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"style",
",",
"schema",
")",
"except",
"jsonschema",
".",
"ValidationError",
"as",
"exc",
":",
"new_exc",
"=",
"StyleValidationError",
"(",
"exc",
")",
"# Don't dump the original jsonschema exception because it is already",
"# included in the StyleValidationError's message.",
"new_exc",
".",
"__cause__",
"=",
"None",
"raise",
"new_exc"
] | Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not valid. | [
"Check",
"style",
"against",
"pyout",
".",
"styling",
".",
"schema",
"."
] | train | https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/elements.py#L241-L265 |
pyout/pyout | pyout/elements.py | value_type | def value_type(value):
"""Classify `value` of bold, color, and underline keys.
Parameters
----------
value : style value
Returns
-------
str, {"simple", "lookup", "re_lookup", "interval"}
"""
try:
keys = list(value.keys())
except AttributeError:
return "simple"
if keys in [["lookup"], ["re_lookup"], ["interval"]]:
return keys[0]
raise ValueError("Type of `value` could not be determined") | python | def value_type(value):
"""Classify `value` of bold, color, and underline keys.
Parameters
----------
value : style value
Returns
-------
str, {"simple", "lookup", "re_lookup", "interval"}
"""
try:
keys = list(value.keys())
except AttributeError:
return "simple"
if keys in [["lookup"], ["re_lookup"], ["interval"]]:
return keys[0]
raise ValueError("Type of `value` could not be determined") | [
"def",
"value_type",
"(",
"value",
")",
":",
"try",
":",
"keys",
"=",
"list",
"(",
"value",
".",
"keys",
"(",
")",
")",
"except",
"AttributeError",
":",
"return",
"\"simple\"",
"if",
"keys",
"in",
"[",
"[",
"\"lookup\"",
"]",
",",
"[",
"\"re_lookup\"",
"]",
",",
"[",
"\"interval\"",
"]",
"]",
":",
"return",
"keys",
"[",
"0",
"]",
"raise",
"ValueError",
"(",
"\"Type of `value` could not be determined\"",
")"
] | Classify `value` of bold, color, and underline keys.
Parameters
----------
value : style value
Returns
-------
str, {"simple", "lookup", "re_lookup", "interval"} | [
"Classify",
"value",
"of",
"bold",
"color",
"and",
"underline",
"keys",
"."
] | train | https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/elements.py#L268-L285 |
letuananh/chirptext | chirptext/dekomecab.py | _register_mecab_loc | def _register_mecab_loc(location):
''' Set MeCab binary location '''
global MECAB_LOC
if not os.path.isfile(location):
logging.getLogger(__name__).warning("Provided mecab binary location does not exist {}".format(location))
logging.getLogger(__name__).info("Mecab binary is switched to: {}".format(location))
MECAB_LOC = location | python | def _register_mecab_loc(location):
''' Set MeCab binary location '''
global MECAB_LOC
if not os.path.isfile(location):
logging.getLogger(__name__).warning("Provided mecab binary location does not exist {}".format(location))
logging.getLogger(__name__).info("Mecab binary is switched to: {}".format(location))
MECAB_LOC = location | [
"def",
"_register_mecab_loc",
"(",
"location",
")",
":",
"global",
"MECAB_LOC",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"location",
")",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"warning",
"(",
"\"Provided mecab binary location does not exist {}\"",
".",
"format",
"(",
"location",
")",
")",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"info",
"(",
"\"Mecab binary is switched to: {}\"",
".",
"format",
"(",
"location",
")",
")",
"MECAB_LOC",
"=",
"location"
] | Set MeCab binary location | [
"Set",
"MeCab",
"binary",
"location"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/dekomecab.py#L43-L49 |
letuananh/chirptext | chirptext/dekomecab.py | run_mecab_process | def run_mecab_process(content, *args, **kwargs):
''' Use subprocess to run mecab '''
encoding = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
mecab_loc = kwargs['mecab_loc'] if 'mecab_loc' in kwargs else None
if mecab_loc is None:
mecab_loc = MECAB_LOC
proc_args = [mecab_loc]
if args:
proc_args.extend(args)
output = subprocess.run(proc_args,
input=content.encode(encoding),
stdout=subprocess.PIPE)
output_string = os.linesep.join(output.stdout.decode(encoding).splitlines())
return output_string | python | def run_mecab_process(content, *args, **kwargs):
''' Use subprocess to run mecab '''
encoding = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
mecab_loc = kwargs['mecab_loc'] if 'mecab_loc' in kwargs else None
if mecab_loc is None:
mecab_loc = MECAB_LOC
proc_args = [mecab_loc]
if args:
proc_args.extend(args)
output = subprocess.run(proc_args,
input=content.encode(encoding),
stdout=subprocess.PIPE)
output_string = os.linesep.join(output.stdout.decode(encoding).splitlines())
return output_string | [
"def",
"run_mecab_process",
"(",
"content",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"encoding",
"=",
"'utf-8'",
"if",
"'encoding'",
"not",
"in",
"kwargs",
"else",
"kwargs",
"[",
"'encoding'",
"]",
"mecab_loc",
"=",
"kwargs",
"[",
"'mecab_loc'",
"]",
"if",
"'mecab_loc'",
"in",
"kwargs",
"else",
"None",
"if",
"mecab_loc",
"is",
"None",
":",
"mecab_loc",
"=",
"MECAB_LOC",
"proc_args",
"=",
"[",
"mecab_loc",
"]",
"if",
"args",
":",
"proc_args",
".",
"extend",
"(",
"args",
")",
"output",
"=",
"subprocess",
".",
"run",
"(",
"proc_args",
",",
"input",
"=",
"content",
".",
"encode",
"(",
"encoding",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"output_string",
"=",
"os",
".",
"linesep",
".",
"join",
"(",
"output",
".",
"stdout",
".",
"decode",
"(",
"encoding",
")",
".",
"splitlines",
"(",
")",
")",
"return",
"output_string"
] | Use subprocess to run mecab | [
"Use",
"subprocess",
"to",
"run",
"mecab"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/dekomecab.py#L57-L70 |
letuananh/chirptext | chirptext/dekomecab.py | parse | def parse(content, *args, **kwargs):
''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed '''
global MECAB_PYTHON3
if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs) | python | def parse(content, *args, **kwargs):
''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed '''
global MECAB_PYTHON3
if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs) | [
"def",
"parse",
"(",
"content",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"MECAB_PYTHON3",
"if",
"'mecab_loc'",
"not",
"in",
"kwargs",
"and",
"MECAB_PYTHON3",
"and",
"'MeCab'",
"in",
"globals",
"(",
")",
":",
"return",
"MeCab",
".",
"Tagger",
"(",
"*",
"args",
")",
".",
"parse",
"(",
"content",
")",
"else",
":",
"return",
"run_mecab_process",
"(",
"content",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed | [
"Use",
"mecab",
"-",
"python3",
"by",
"default",
"to",
"parse",
"JP",
"text",
".",
"Fall",
"back",
"to",
"mecab",
"binary",
"app",
"if",
"needed"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/dekomecab.py#L73-L79 |
meng89/ipodshuffle | ipodshuffle/shuffle.py | Shuffle.create_track | def create_track(self, path_in_ipod=None, checksum=None):
"""
:param path_in_ipod: the path of audio file in the iPod base
:param checksum: CHECKSUM of the audio file in member audiodb
:return: a new Track, you may want append it to the playlist.tracks
"""
if bool(path_in_ipod) == bool(checksum):
raise Exception
if not path_in_ipod:
path_in_ipod = self.audiodb.get_voice(checksum)
track = Track(self, path_in_ipod=path_in_ipod)
return track | python | def create_track(self, path_in_ipod=None, checksum=None):
"""
:param path_in_ipod: the path of audio file in the iPod base
:param checksum: CHECKSUM of the audio file in member audiodb
:return: a new Track, you may want append it to the playlist.tracks
"""
if bool(path_in_ipod) == bool(checksum):
raise Exception
if not path_in_ipod:
path_in_ipod = self.audiodb.get_voice(checksum)
track = Track(self, path_in_ipod=path_in_ipod)
return track | [
"def",
"create_track",
"(",
"self",
",",
"path_in_ipod",
"=",
"None",
",",
"checksum",
"=",
"None",
")",
":",
"if",
"bool",
"(",
"path_in_ipod",
")",
"==",
"bool",
"(",
"checksum",
")",
":",
"raise",
"Exception",
"if",
"not",
"path_in_ipod",
":",
"path_in_ipod",
"=",
"self",
".",
"audiodb",
".",
"get_voice",
"(",
"checksum",
")",
"track",
"=",
"Track",
"(",
"self",
",",
"path_in_ipod",
"=",
"path_in_ipod",
")",
"return",
"track"
] | :param path_in_ipod: the path of audio file in the iPod base
:param checksum: CHECKSUM of the audio file in member audiodb
:return: a new Track, you may want append it to the playlist.tracks | [
":",
"param",
"path_in_ipod",
":",
"the",
"path",
"of",
"audio",
"file",
"in",
"the",
"iPod",
"base",
":",
"param",
"checksum",
":",
"CHECKSUM",
"of",
"the",
"audio",
"file",
"in",
"member",
"audiodb",
":",
"return",
":",
"a",
"new",
"Track",
"you",
"may",
"want",
"append",
"it",
"to",
"the",
"playlist",
".",
"tracks"
] | train | https://github.com/meng89/ipodshuffle/blob/c9093dbb5cdac609376ebd3b4ef1b0fc58107d96/ipodshuffle/shuffle.py#L180-L194 |
meng89/ipodshuffle | ipodshuffle/shuffle.py | _Voice.voice | def voice(self):
"""tuple. contain text and lang code
"""
dbid = self.lldb.dbid
text, lang = self._voiceoverdb.get_text_lang(dbid)
return text, lang | python | def voice(self):
"""tuple. contain text and lang code
"""
dbid = self.lldb.dbid
text, lang = self._voiceoverdb.get_text_lang(dbid)
return text, lang | [
"def",
"voice",
"(",
"self",
")",
":",
"dbid",
"=",
"self",
".",
"lldb",
".",
"dbid",
"text",
",",
"lang",
"=",
"self",
".",
"_voiceoverdb",
".",
"get_text_lang",
"(",
"dbid",
")",
"return",
"text",
",",
"lang"
] | tuple. contain text and lang code | [
"tuple",
".",
"contain",
"text",
"and",
"lang",
"code"
] | train | https://github.com/meng89/ipodshuffle/blob/c9093dbb5cdac609376ebd3b4ef1b0fc58107d96/ipodshuffle/shuffle.py#L229-L234 |
meng89/ipodshuffle | ipodshuffle/shuffle.py | AudioDB.add | def add(self, src):
""" store an audio file to storage dir
:param src: audio file path
:return: checksum value
"""
if not audio.get_type(src):
raise TypeError('The type of this file is not supported.')
return super().add(src) | python | def add(self, src):
""" store an audio file to storage dir
:param src: audio file path
:return: checksum value
"""
if not audio.get_type(src):
raise TypeError('The type of this file is not supported.')
return super().add(src) | [
"def",
"add",
"(",
"self",
",",
"src",
")",
":",
"if",
"not",
"audio",
".",
"get_type",
"(",
"src",
")",
":",
"raise",
"TypeError",
"(",
"'The type of this file is not supported.'",
")",
"return",
"super",
"(",
")",
".",
"add",
"(",
"src",
")"
] | store an audio file to storage dir
:param src: audio file path
:return: checksum value | [
"store",
"an",
"audio",
"file",
"to",
"storage",
"dir"
] | train | https://github.com/meng89/ipodshuffle/blob/c9093dbb5cdac609376ebd3b4ef1b0fc58107d96/ipodshuffle/shuffle.py#L390-L399 |
alefnula/tea | tea/process/posix_process.py | _get_cmd | def _get_cmd(command, arguments):
"""Merge command with arguments."""
if arguments is None:
arguments = []
if command.endswith(".py") or command.endswith(".pyw"):
return [sys.executable, command] + list(arguments)
else:
return [command] + list(arguments) | python | def _get_cmd(command, arguments):
"""Merge command with arguments."""
if arguments is None:
arguments = []
if command.endswith(".py") or command.endswith(".pyw"):
return [sys.executable, command] + list(arguments)
else:
return [command] + list(arguments) | [
"def",
"_get_cmd",
"(",
"command",
",",
"arguments",
")",
":",
"if",
"arguments",
"is",
"None",
":",
"arguments",
"=",
"[",
"]",
"if",
"command",
".",
"endswith",
"(",
"\".py\"",
")",
"or",
"command",
".",
"endswith",
"(",
"\".pyw\"",
")",
":",
"return",
"[",
"sys",
".",
"executable",
",",
"command",
"]",
"+",
"list",
"(",
"arguments",
")",
"else",
":",
"return",
"[",
"command",
"]",
"+",
"list",
"(",
"arguments",
")"
] | Merge command with arguments. | [
"Merge",
"command",
"with",
"arguments",
"."
] | train | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/process/posix_process.py#L42-L49 |
MainRo/cyclotron-std | cyclotron_std/argparse.py | argparse | def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
"""
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) | python | def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
"""
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) | [
"def",
"argparse",
"(",
"argv",
",",
"parser",
",",
"arguments",
")",
":",
"def",
"add_arg",
"(",
"parser",
",",
"arg_spec",
")",
":",
"parser",
".",
"add_argument",
"(",
"arg_spec",
".",
"name",
",",
"help",
"=",
"arg_spec",
".",
"help",
")",
"return",
"parser",
"parse_request",
"=",
"parser",
".",
"map",
"(",
"lambda",
"i",
":",
"ArgumentParser",
"(",
"description",
"=",
"i",
".",
"description",
")",
")",
".",
"combine_latest",
"(",
"arguments",
",",
"lambda",
"parser",
",",
"arg_def",
":",
"add_arg",
"(",
"parser",
",",
"arg_def",
")",
")",
".",
"last",
"(",
")",
".",
"combine_latest",
"(",
"argv",
".",
"to_list",
"(",
")",
",",
"lambda",
"parser",
",",
"args",
":",
"(",
"parser",
",",
"args",
")",
")",
"def",
"subscribe",
"(",
"observer",
")",
":",
"def",
"on_next",
"(",
"value",
")",
":",
"parser",
",",
"args",
"=",
"value",
"try",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"args",
")",
".",
"items",
"(",
")",
":",
"observer",
".",
"on_next",
"(",
"Argument",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
")",
"except",
"NameError",
"as",
"exc",
":",
"observer",
".",
"on_error",
"(",
"\"{}\\n{}\"",
".",
"format",
"(",
"exc",
",",
"parser",
".",
"format_help",
"(",
")",
")",
")",
"return",
"parse_request",
".",
"subscribe",
"(",
"on_next",
",",
"observer",
".",
"on_error",
",",
"observer",
".",
"on_completed",
")",
"return",
"AnonymousObservable",
"(",
"subscribe",
")"
] | A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items. | [
"A",
"command",
"line",
"argument",
"parser",
".",
"Parses",
"arguments",
"coming",
"from",
"the",
"argv",
"Observable",
"and",
"outputs",
"them",
"as",
"Argument",
"items",
"in",
"the",
"output",
"observable",
"."
] | train | https://github.com/MainRo/cyclotron-std/blob/da4188f1c99aafc3a491803c23e01b44b52a15b1/cyclotron_std/argparse.py#L22-L65 |
lingfeiwang/findr-python | findr/pij.py | gassists_pv | def gassists_pv(self,dg,dt,dt2,na=None,memlimit=-1):
"""Calculates p-values of gene i regulating gene j with genotype data assisted method with multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='f4' by default)). P-values for LLR of test 1.
Test 1 calculates E(A)->A v.s. E(A) A.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis6
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dg.dtype.char!=gtype_np:
raise ValueError('Wrong input dtype for genotype data: dg.dtype.char is '+dg.dtype.char+'!='+gtype_np)
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
if not (na is None or isint(na)):
raise ValueError('Wrong na type')
if na is not None and na<=0:
raise ValueError('Input requires na>0.')
ng=dg.shape[0]
nt=dt2.shape[0]
ns=dg.shape[1]
nvx=na+1 if na else dg.max()+1
if nvx<2:
raise ValueError('Invalid genotype values')
if dt.shape!=dg.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
arglist=['const MATRIXG*','const MATRIXF*','const MATRIXF*','VECTORF*','MATRIXF*','MATRIXF*','MATRIXF*','MATRIXF*','size_t','size_t']
dgr=np.require(dg,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
d1=np.require(np.zeros(ng,dtype=dt.dtype),requirements=['A','C','O','W'])
d2=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d3=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d4=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d5=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
args=[dgr,dtr,dt2r,d1,d2,d3,d4,d5,nvx,memlimit]
func=self.cfunc('pijs_gassist_pv',rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p1':d1,'p2':d2,'p3':d3,'p4':d4,'p5':d5}
return ans | python | def gassists_pv(self,dg,dt,dt2,na=None,memlimit=-1):
"""Calculates p-values of gene i regulating gene j with genotype data assisted method with multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='f4' by default)). P-values for LLR of test 1.
Test 1 calculates E(A)->A v.s. E(A) A.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis6
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dg.dtype.char!=gtype_np:
raise ValueError('Wrong input dtype for genotype data: dg.dtype.char is '+dg.dtype.char+'!='+gtype_np)
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
if not (na is None or isint(na)):
raise ValueError('Wrong na type')
if na is not None and na<=0:
raise ValueError('Input requires na>0.')
ng=dg.shape[0]
nt=dt2.shape[0]
ns=dg.shape[1]
nvx=na+1 if na else dg.max()+1
if nvx<2:
raise ValueError('Invalid genotype values')
if dt.shape!=dg.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
arglist=['const MATRIXG*','const MATRIXF*','const MATRIXF*','VECTORF*','MATRIXF*','MATRIXF*','MATRIXF*','MATRIXF*','size_t','size_t']
dgr=np.require(dg,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
d1=np.require(np.zeros(ng,dtype=dt.dtype),requirements=['A','C','O','W'])
d2=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d3=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d4=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d5=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
args=[dgr,dtr,dt2r,d1,d2,d3,d4,d5,nvx,memlimit]
func=self.cfunc('pijs_gassist_pv',rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p1':d1,'p2':d2,'p3':d3,'p4':d4,'p5':d5}
return ans | [
"def",
"gassists_pv",
"(",
"self",
",",
"dg",
",",
"dt",
",",
"dt2",
",",
"na",
"=",
"None",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
",",
"gtype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dg",
".",
"dtype",
".",
"char",
"!=",
"gtype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for genotype data: dg.dtype.char is '",
"+",
"dg",
".",
"dtype",
".",
"char",
"+",
"'!='",
"+",
"gtype_np",
")",
"if",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dg",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"if",
"not",
"(",
"na",
"is",
"None",
"or",
"isint",
"(",
"na",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong na type'",
")",
"if",
"na",
"is",
"not",
"None",
"and",
"na",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Input requires na>0.'",
")",
"ng",
"=",
"dg",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dg",
".",
"shape",
"[",
"1",
"]",
"nvx",
"=",
"na",
"+",
"1",
"if",
"na",
"else",
"dg",
".",
"max",
"(",
")",
"+",
"1",
"if",
"nvx",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid genotype values'",
")",
"if",
"dt",
".",
"shape",
"!=",
"dg",
".",
"shape",
"or",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"arglist",
"=",
"[",
"'const MATRIXG*'",
",",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'VECTORF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'size_t'",
",",
"'size_t'",
"]",
"dgr",
"=",
"np",
".",
"require",
"(",
"dg",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d1",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"ng",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d2",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d3",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d4",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d5",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"args",
"=",
"[",
"dgr",
",",
"dtr",
",",
"dt2r",
",",
"d1",
",",
"d2",
",",
"d3",
",",
"d4",
",",
"d5",
",",
"nvx",
",",
"memlimit",
"]",
"func",
"=",
"self",
".",
"cfunc",
"(",
"'pijs_gassist_pv'",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"arglist",
")",
"ret",
"=",
"func",
"(",
"*",
"args",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p1'",
":",
"d1",
",",
"'p2'",
":",
"d2",
",",
"'p3'",
":",
"d3",
",",
"'p4'",
":",
"d4",
",",
"'p5'",
":",
"d5",
"}",
"return",
"ans"
] | Calculates p-values of gene i regulating gene j with genotype data assisted method with multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='f4' by default)). P-values for LLR of test 1.
Test 1 calculates E(A)->A v.s. E(A) A.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='f4' by default)). P-values for LLR of test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis6 | [
"Calculates",
"p",
"-",
"values",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"genotype",
"data",
"assisted",
"method",
"with",
"multiple",
"tests",
".",
"dg",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"gtype",
"(",
"=",
"u1",
"by",
"default",
"))",
"Genotype",
"data",
".",
"Entry",
"dg",
"[",
"i",
"j",
"]",
"is",
"genotype",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Each",
"value",
"must",
"be",
"among",
"0",
"1",
"...",
"na",
".",
"Genotype",
"i",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
"(",
"in",
"dt",
")",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"Genotype",
"i",
"(",
"in",
"dg",
")",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"na",
":",
"Number",
"of",
"alleles",
"the",
"species",
"have",
".",
"It",
"determintes",
"the",
"maximum",
"number",
"of",
"values",
"each",
"genotype",
"can",
"take",
".",
"When",
"unspecified",
"it",
"is",
"automatically",
"determined",
"as",
"the",
"maximum",
"of",
"dg",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p1",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"LLR",
"of",
"test",
"1",
".",
"Test",
"1",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"A",
".",
"p2",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"LLR",
"of",
"test",
"2",
".",
"Test",
"2",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A<",
"-",
"B",
".",
"p3",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"LLR",
"of",
"test",
"3",
".",
"Test",
"3",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"-",
">",
"B",
".",
"p4",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"LLR",
"of",
"test",
"4",
".",
"Test",
"4",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"B",
".",
"p5",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"LLR",
"of",
"test",
"5",
".",
"Test",
"5",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"B<",
"-",
"E",
"(",
"A",
")",
"-",
">",
"A",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"and",
"gtype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis6"
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L23-L96 |
lingfeiwang/findr-python | findr/pij.py | _gassist_any | def _gassist_any(self,dg,dt,dt2,name,na=None,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dg.dtype.char!=gtype_np:
raise ValueError('Wrong input dtype for genotype data: dg.dtype.char is '+dg.dtype.char+'!='+gtype_np)
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
if not (na is None or isint(na)):
raise ValueError('Wrong na type')
if na is not None and na<=0:
raise ValueError('Input requires na>0.')
ng=dg.shape[0]
nt=dt2.shape[0]
ns=dg.shape[1]
nvx=na+1 if na else dg.max()+1
nd=1 if nodiag else 0
if nvx<2:
raise ValueError('Invalid genotype values')
if dt.shape!=dg.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
func=self.cfunc(name,rettype='int',argtypes=['const MATRIXG*','const MATRIXF*','const MATRIXF*','MATRIXF*','size_t','byte','size_t'])
d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dgr=np.require(dg,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
ret=func(dgr,dtr,dt2r,d,nvx,nd,memlimit)
ans={'ret':ret,'p':d}
return ans | python | def _gassist_any(self,dg,dt,dt2,name,na=None,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dg.dtype.char!=gtype_np:
raise ValueError('Wrong input dtype for genotype data: dg.dtype.char is '+dg.dtype.char+'!='+gtype_np)
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
if not (na is None or isint(na)):
raise ValueError('Wrong na type')
if na is not None and na<=0:
raise ValueError('Input requires na>0.')
ng=dg.shape[0]
nt=dt2.shape[0]
ns=dg.shape[1]
nvx=na+1 if na else dg.max()+1
nd=1 if nodiag else 0
if nvx<2:
raise ValueError('Invalid genotype values')
if dt.shape!=dg.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
func=self.cfunc(name,rettype='int',argtypes=['const MATRIXG*','const MATRIXF*','const MATRIXF*','MATRIXF*','size_t','byte','size_t'])
d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dgr=np.require(dg,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
ret=func(dgr,dtr,dt2r,d,nvx,nd,memlimit)
ans={'ret':ret,'p':d}
return ans | [
"def",
"_gassist_any",
"(",
"self",
",",
"dg",
",",
"dt",
",",
"dt2",
",",
"name",
",",
"na",
"=",
"None",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
",",
"gtype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dg",
".",
"dtype",
".",
"char",
"!=",
"gtype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for genotype data: dg.dtype.char is '",
"+",
"dg",
".",
"dtype",
".",
"char",
"+",
"'!='",
"+",
"gtype_np",
")",
"if",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dg",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"type",
"(",
"nodiag",
")",
"is",
"not",
"bool",
":",
"raise",
"ValueError",
"(",
"'Wrong nodiag type'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"if",
"not",
"(",
"na",
"is",
"None",
"or",
"isint",
"(",
"na",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong na type'",
")",
"if",
"na",
"is",
"not",
"None",
"and",
"na",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Input requires na>0.'",
")",
"ng",
"=",
"dg",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dg",
".",
"shape",
"[",
"1",
"]",
"nvx",
"=",
"na",
"+",
"1",
"if",
"na",
"else",
"dg",
".",
"max",
"(",
")",
"+",
"1",
"nd",
"=",
"1",
"if",
"nodiag",
"else",
"0",
"if",
"nvx",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid genotype values'",
")",
"if",
"dt",
".",
"shape",
"!=",
"dg",
".",
"shape",
"or",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"func",
"=",
"self",
".",
"cfunc",
"(",
"name",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"[",
"'const MATRIXG*'",
",",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'size_t'",
",",
"'byte'",
",",
"'size_t'",
"]",
")",
"d",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dgr",
"=",
"np",
".",
"require",
"(",
"dg",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"ret",
"=",
"func",
"(",
"dgr",
",",
"dtr",
",",
"dt2r",
",",
"d",
",",
"nvx",
",",
"nd",
",",
"memlimit",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p'",
":",
"d",
"}",
"return",
"ans"
] | Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py. | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"genotype",
"data",
"assisted",
"method",
"with",
"the",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"dg",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"gtype",
"(",
"=",
"u1",
"by",
"default",
"))",
"Genotype",
"data",
".",
"Entry",
"dg",
"[",
"i",
"j",
"]",
"is",
"genotype",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Each",
"value",
"must",
"be",
"among",
"0",
"1",
"...",
"na",
".",
"Genotype",
"i",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
"(",
"in",
"dt",
")",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"Genotype",
"i",
"(",
"in",
"dg",
")",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"name",
":",
"actual",
"C",
"function",
"name",
"to",
"call",
"na",
":",
"Number",
"of",
"alleles",
"the",
"species",
"have",
".",
"It",
"determintes",
"the",
"maximum",
"number",
"of",
"values",
"each",
"genotype",
"can",
"take",
".",
"When",
"unspecified",
"it",
"is",
"automatically",
"determined",
"as",
"the",
"maximum",
"of",
"dg",
".",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"function",
"from",
"for",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"and",
"gtype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
"."
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L187-L254 |
lingfeiwang/findr-python | findr/pij.py | gassist | def gassist(self,dg,dt,dt2,na=None,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis2, findr.examples.geuvadis3
"""
return _gassist_any(self,dg,dt,dt2,"pij_gassist",na=na,nodiag=nodiag,memlimit=memlimit) | python | def gassist(self,dg,dt,dt2,na=None,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis2, findr.examples.geuvadis3
"""
return _gassist_any(self,dg,dt,dt2,"pij_gassist",na=na,nodiag=nodiag,memlimit=memlimit) | [
"def",
"gassist",
"(",
"self",
",",
"dg",
",",
"dt",
",",
"dt2",
",",
"na",
"=",
"None",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"return",
"_gassist_any",
"(",
"self",
",",
"dg",
",",
"dt",
",",
"dt2",
",",
"\"pij_gassist\"",
",",
"na",
"=",
"na",
",",
"nodiag",
"=",
"nodiag",
",",
"memlimit",
"=",
"memlimit",
")"
] | Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis2, findr.examples.geuvadis3 | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"genotype",
"data",
"assisted",
"method",
"with",
"the",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"Probabilities",
"are",
"converted",
"from",
"likelihood",
"ratios",
"separately",
"for",
"each",
"A",
".",
"This",
"gives",
"better",
"predictions",
"when",
"the",
"number",
"of",
"secondary",
"targets",
"(",
"dt2",
")",
"is",
"large",
".",
"(",
"Check",
"program",
"warnings",
".",
")",
"dg",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"gtype",
"(",
"=",
"u1",
"by",
"default",
"))",
"Genotype",
"data",
".",
"Entry",
"dg",
"[",
"i",
"j",
"]",
"is",
"genotype",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Each",
"value",
"must",
"be",
"among",
"0",
"1",
"...",
"na",
".",
"Genotype",
"i",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
"(",
"in",
"dt",
")",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"Genotype",
"i",
"(",
"in",
"dg",
")",
"must",
"be",
"best",
"(",
"and",
"significant",
")",
"eQTL",
"of",
"gene",
"i",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"na",
":",
"Number",
"of",
"alleles",
"the",
"species",
"have",
".",
"It",
"determintes",
"the",
"maximum",
"number",
"of",
"values",
"each",
"genotype",
"can",
"take",
".",
"When",
"unspecified",
"it",
"is",
"automatically",
"determined",
"as",
"the",
"maximum",
"of",
"dg",
".",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"function",
"from",
"for",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"and",
"gtype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis2",
"findr",
".",
"examples",
".",
"geuvadis3"
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L256-L287 |
lingfeiwang/findr-python | findr/pij.py | _cassists_any | def _cassists_any(self,dc,dt,dt2,name,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous anchor data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np
from .types import isint
if dc.dtype.char!=ftype_np or dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dc.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dc.shape[0]
nt=dt2.shape[0]
ns=dc.shape[1]
nd=1 if nodiag else 0
if dt.shape!=dc.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dc).sum()+np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
arglist=['const MATRIXF*','const MATRIXF*','const MATRIXF*','VECTORF*','MATRIXF*','MATRIXF*','MATRIXF*','MATRIXF*','byte','size_t']
names=name
dcr=np.require(dc,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
d1=np.require(np.zeros(ng,dtype=dt.dtype),requirements=['A','C','O','W'])
d2=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d3=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d4=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d5=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
args=[dcr,dtr,dt2r,d1,d2,d3,d4,d5,nd,memlimit]
func=self.cfunc(names,rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p1':d1,'p2':d2,'p3':d3,'p4':d4,'p5':d5}
return ans | python | def _cassists_any(self,dc,dt,dt2,name,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous anchor data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np
from .types import isint
if dc.dtype.char!=ftype_np or dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dc.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dc.shape[0]
nt=dt2.shape[0]
ns=dc.shape[1]
nd=1 if nodiag else 0
if dt.shape!=dc.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dc).sum()+np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
arglist=['const MATRIXF*','const MATRIXF*','const MATRIXF*','VECTORF*','MATRIXF*','MATRIXF*','MATRIXF*','MATRIXF*','byte','size_t']
names=name
dcr=np.require(dc,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
d1=np.require(np.zeros(ng,dtype=dt.dtype),requirements=['A','C','O','W'])
d2=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d3=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d4=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
d5=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
args=[dcr,dtr,dt2r,d1,d2,d3,d4,d5,nd,memlimit]
func=self.cfunc(names,rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p1':d1,'p2':d2,'p3':d3,'p4':d4,'p5':d5}
return ans | [
"def",
"_cassists_any",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"name",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dc",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dc",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"type",
"(",
"nodiag",
")",
"is",
"not",
"bool",
":",
"raise",
"ValueError",
"(",
"'Wrong nodiag type'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"ng",
"=",
"dc",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dc",
".",
"shape",
"[",
"1",
"]",
"nd",
"=",
"1",
"if",
"nodiag",
"else",
"0",
"if",
"dt",
".",
"shape",
"!=",
"dc",
".",
"shape",
"or",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dc",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"arglist",
"=",
"[",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'VECTORF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'byte'",
",",
"'size_t'",
"]",
"names",
"=",
"name",
"dcr",
"=",
"np",
".",
"require",
"(",
"dc",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d1",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"ng",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d2",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d3",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d4",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"d5",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"args",
"=",
"[",
"dcr",
",",
"dtr",
",",
"dt2r",
",",
"d1",
",",
"d2",
",",
"d3",
",",
"d4",
",",
"d5",
",",
"nd",
",",
"memlimit",
"]",
"func",
"=",
"self",
".",
"cfunc",
"(",
"names",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"arglist",
")",
"ret",
"=",
"func",
"(",
"*",
"args",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p1'",
":",
"d1",
",",
"'p2'",
":",
"d2",
",",
"'p3'",
":",
"d3",
",",
"'p4'",
":",
"d4",
",",
"'p5'",
":",
"d5",
"}",
"return",
"ans"
] | Calculates probability of gene i regulating gene j with continuous anchor data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py. | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"continuous",
"anchor",
"data",
"assisted",
"method",
"with",
"multiple",
"tests",
"by",
"converting",
"log",
"likelihoods",
"into",
"probabilities",
"per",
"A",
"for",
"all",
"B",
".",
"dc",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
"Continuous",
"anchor",
"data",
".",
"Entry",
"dc",
"[",
"i",
"j",
"]",
"is",
"anchor",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Anchor",
"i",
"is",
"used",
"to",
"infer",
"the",
"probability",
"of",
"gene",
"i",
"-",
">",
"any",
"other",
"gene",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"name",
":",
"actual",
"C",
"function",
"name",
"to",
"call",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p1",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"1",
".",
"Test",
"1",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"A",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"For",
"nodiag",
"=",
"False",
"because",
"the",
"function",
"expects",
"significant",
"anchors",
"p1",
"always",
"return",
"1",
".",
"For",
"nodiag",
"=",
"True",
"uses",
"diagonal",
"elements",
"of",
"p2",
".",
"p2",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"2",
".",
"Test",
"2",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A<",
"-",
"B",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"p3",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"3",
".",
"Test",
"3",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"-",
">",
"B",
".",
"The",
"latter",
"one",
"is",
"preferred",
".",
"p4",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"4",
".",
"Test",
"4",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"B",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"p5",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"5",
".",
"Test",
"5",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"B<",
"-",
"E",
"(",
"A",
")",
"-",
">",
"A",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
"."
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L383-L454 |
lingfeiwang/findr-python | findr/pij.py | cassists | def cassists(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis4 (similar format)
"""
return _cassists_any(self,dc,dt,dt2,"pijs_cassist",nodiag=nodiag,memlimit=memlimit) | python | def cassists(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis4 (similar format)
"""
return _cassists_any(self,dc,dt,dt2,"pijs_cassist",nodiag=nodiag,memlimit=memlimit) | [
"def",
"cassists",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"return",
"_cassists_any",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"\"pijs_cassist\"",
",",
"nodiag",
"=",
"nodiag",
",",
"memlimit",
"=",
"memlimit",
")"
] | Calculates probability of gene i regulating gene j with continuous data assisted method,
with multiple tests, by converting log likelihoods into probabilities per A for all B.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p1: numpy.ndarray(nt,dtype=ftype(='=f4' by default)). Probability for test 1.
Test 1 calculates E(A)->A v.s. E(A) A. The earlier one is preferred.
For nodiag=False, because the function expects significant anchors, p1 always return 1.
For nodiag=True, uses diagonal elements of p2.
p2: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 2.
Test 2 calculates E(A)->A--B with E(A)->B v.s. E(A)->A<-B. The earlier one is preferred.
p3: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 3.
Test 3 calculates E(A)->A--B with E(A)->B v.s. E(A)->A->B. The latter one is preferred.
p4: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 4.
Test 4 calculates E(A)->A--B with E(A)->B v.s. E(A)->A B. The earlier one is preferred.
p5: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability for test 5.
Test 5 calculates E(A)->A--B with E(A)->B v.s. B<-E(A)->A. The earlier one is preferred.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis4 (similar format) | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"continuous",
"data",
"assisted",
"method",
"with",
"multiple",
"tests",
"by",
"converting",
"log",
"likelihoods",
"into",
"probabilities",
"per",
"A",
"for",
"all",
"B",
".",
"Probabilities",
"are",
"converted",
"from",
"likelihood",
"ratios",
"separately",
"for",
"each",
"A",
".",
"This",
"gives",
"better",
"predictions",
"when",
"the",
"number",
"of",
"secondary",
"targets",
"(",
"dt2",
")",
"is",
"large",
".",
"(",
"Check",
"program",
"warnings",
".",
")",
"dc",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
"Continuous",
"anchor",
"data",
".",
"Entry",
"dc",
"[",
"i",
"j",
"]",
"is",
"anchor",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Anchor",
"i",
"is",
"used",
"to",
"infer",
"the",
"probability",
"of",
"gene",
"i",
"-",
">",
"any",
"other",
"gene",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p1",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"1",
".",
"Test",
"1",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"A",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"For",
"nodiag",
"=",
"False",
"because",
"the",
"function",
"expects",
"significant",
"anchors",
"p1",
"always",
"return",
"1",
".",
"For",
"nodiag",
"=",
"True",
"uses",
"diagonal",
"elements",
"of",
"p2",
".",
"p2",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"2",
".",
"Test",
"2",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A<",
"-",
"B",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"p3",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"3",
".",
"Test",
"3",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"-",
">",
"B",
".",
"The",
"latter",
"one",
"is",
"preferred",
".",
"p4",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"4",
".",
"Test",
"4",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"E",
"(",
"A",
")",
"-",
">",
"A",
"B",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"p5",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"for",
"test",
"5",
".",
"Test",
"5",
"calculates",
"E",
"(",
"A",
")",
"-",
">",
"A",
"--",
"B",
"with",
"E",
"(",
"A",
")",
"-",
">",
"B",
"v",
".",
"s",
".",
"B<",
"-",
"E",
"(",
"A",
")",
"-",
">",
"A",
".",
"The",
"earlier",
"one",
"is",
"preferred",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis4",
"(",
"similar",
"format",
")"
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L456-L493 |
lingfeiwang/findr-python | findr/pij.py | _cassist_any | def _cassist_any(self,dc,dt,dt2,name,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np
from .types import isint
if dc.dtype.char!=ftype_np or dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dc.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dc.shape[0]
nt=dt2.shape[0]
ns=dc.shape[1]
nd=1 if nodiag else 0
if dt.shape!=dc.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dc).sum()+np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
func=self.cfunc(name,rettype='int',argtypes=['const MATRIXF*','const MATRIXF*','const MATRIXF*','MATRIXF*','byte','size_t'])
d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dcr=np.require(dc,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
ret=func(dcr,dtr,dt2r,d,nd,memlimit)
ans={'ret':ret,'p':d}
return ans | python | def _cassist_any(self,dc,dt,dt2,name,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np
from .types import isint
if dc.dtype.char!=ftype_np or dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dc.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if type(nodiag) is not bool:
raise ValueError('Wrong nodiag type')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dc.shape[0]
nt=dt2.shape[0]
ns=dc.shape[1]
nd=1 if nodiag else 0
if dt.shape!=dc.shape or dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dc).sum()+np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
func=self.cfunc(name,rettype='int',argtypes=['const MATRIXF*','const MATRIXF*','const MATRIXF*','MATRIXF*','byte','size_t'])
d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dcr=np.require(dc,requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
ret=func(dcr,dtr,dt2r,d,nd,memlimit)
ans={'ret':ret,'p':d}
return ans | [
"def",
"_cassist_any",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"name",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dc",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dc",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"type",
"(",
"nodiag",
")",
"is",
"not",
"bool",
":",
"raise",
"ValueError",
"(",
"'Wrong nodiag type'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"ng",
"=",
"dc",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dc",
".",
"shape",
"[",
"1",
"]",
"nd",
"=",
"1",
"if",
"nodiag",
"else",
"0",
"if",
"dt",
".",
"shape",
"!=",
"dc",
".",
"shape",
"or",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dc",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"func",
"=",
"self",
".",
"cfunc",
"(",
"name",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"[",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'byte'",
",",
"'size_t'",
"]",
")",
"d",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dcr",
"=",
"np",
".",
"require",
"(",
"dc",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"ret",
"=",
"func",
"(",
"dcr",
",",
"dtr",
",",
"dt2r",
",",
"d",
",",
"nd",
",",
"memlimit",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p'",
":",
"d",
"}",
"return",
"ans"
] | Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py. | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"continuous",
"data",
"assisted",
"method",
"with",
"the",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"dc",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
"Continuous",
"anchor",
"data",
".",
"Entry",
"dc",
"[",
"i",
"j",
"]",
"is",
"anchor",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Anchor",
"i",
"is",
"used",
"to",
"infer",
"the",
"probability",
"of",
"gene",
"i",
"-",
">",
"any",
"other",
"gene",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"name",
":",
"actual",
"C",
"function",
"name",
"to",
"call",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"function",
"from",
"for",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
"."
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L495-L549 |
lingfeiwang/findr-python | findr/pij.py | cassist | def cassist(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5
"""
return _cassist_any(self,dc,dt,dt2,"pij_cassist",nodiag=nodiag,memlimit=memlimit) | python | def cassist(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5
"""
return _cassist_any(self,dc,dt,dt2,"pij_cassist",nodiag=nodiag,memlimit=memlimit) | [
"def",
"cassist",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"nodiag",
"=",
"False",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"return",
"_cassist_any",
"(",
"self",
",",
"dc",
",",
"dt",
",",
"dt2",
",",
"\"pij_cassist\"",
",",
"nodiag",
"=",
"nodiag",
",",
"memlimit",
"=",
"memlimit",
")"
] | Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5 | [
"Calculates",
"probability",
"of",
"gene",
"i",
"regulating",
"gene",
"j",
"with",
"continuous",
"data",
"assisted",
"method",
"with",
"the",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"Probabilities",
"are",
"converted",
"from",
"likelihood",
"ratios",
"separately",
"for",
"each",
"A",
".",
"This",
"gives",
"better",
"predictions",
"when",
"the",
"number",
"of",
"secondary",
"targets",
"(",
"dt2",
")",
"is",
"large",
".",
"(",
"Check",
"program",
"warnings",
".",
")",
"dc",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"f4",
"by",
"default",
"))",
"Continuous",
"anchor",
"data",
".",
"Entry",
"dc",
"[",
"i",
"j",
"]",
"is",
"anchor",
"i",
"s",
"value",
"for",
"sample",
"j",
".",
"Anchor",
"i",
"is",
"used",
"to",
"infer",
"the",
"probability",
"of",
"gene",
"i",
"-",
">",
"any",
"other",
"gene",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"or",
"a",
"superset",
"of",
"dt",
".",
"When",
"dt2",
"is",
"a",
"superset",
"of",
"(",
"or",
"identical",
"with",
")",
"dt",
"dt2",
"must",
"be",
"arranged",
"to",
"be",
"identical",
"with",
"dt",
"at",
"its",
"upper",
"submatrix",
"i",
".",
"e",
".",
"dt2",
"[",
":",
"nt",
":",
"]",
"=",
"dt",
"and",
"set",
"parameter",
"nodiag",
"=",
"1",
".",
"nodiag",
":",
"skip",
"diagonal",
"regulations",
"i",
".",
"e",
".",
"regulation",
"A",
"-",
">",
"B",
"for",
"A",
"=",
"B",
".",
"This",
"should",
"be",
"set",
"to",
"True",
"when",
"A",
"is",
"a",
"subset",
"of",
"B",
"and",
"aligned",
"correspondingly",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"be",
"split",
"into",
"smaller",
"chunks",
".",
"If",
"the",
"memory",
"limit",
"is",
"smaller",
"than",
"minimum",
"required",
"calculation",
"can",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"Probability",
"function",
"from",
"for",
"recommended",
"combination",
"of",
"multiple",
"tests",
".",
"For",
"more",
"information",
"on",
"tests",
"see",
"paper",
".",
"ftype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis5"
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L551-L578 |
lingfeiwang/findr-python | findr/pij.py | rank_pv | def rank_pv(self,dt,dt2,memlimit=-1):
"""Calculates p-values of gene i correlating with gene j by converting log likelihoods into probabilities per A for all B.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, a subset of, or a superset of dt.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). P-values for A--B.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis1 (similar format)
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dt.shape[0]
nt=dt2.shape[0]
ns=dt.shape[1]
if dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
dp=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
arglist=['const MATRIXF*','const MATRIXF*','MATRIXF*','size_t']
args=[dtr,dt2r,dp,memlimit]
func=self.cfunc('pij_rank_pv',rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p':dp}
return ans | python | def rank_pv(self,dt,dt2,memlimit=-1):
"""Calculates p-values of gene i correlating with gene j by converting log likelihoods into probabilities per A for all B.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, a subset of, or a superset of dt.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). P-values for A--B.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis1 (similar format)
"""
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for gene expression data')
if len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError('Wrong input shape')
if not isint(memlimit):
raise ValueError('Wrong memlimit type')
ng=dt.shape[0]
nt=dt2.shape[0]
ns=dt.shape[1]
if dt2.shape[1]!=ns:
raise ValueError('Wrong input shape')
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError('NaN found.')
dp=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W'])
dtr=np.require(dt,requirements=['A','C','O','W'])
dt2r=np.require(dt2,requirements=['A','C','O','W'])
arglist=['const MATRIXF*','const MATRIXF*','MATRIXF*','size_t']
args=[dtr,dt2r,dp,memlimit]
func=self.cfunc('pij_rank_pv',rettype='int',argtypes=arglist)
ret=func(*args)
ans={'ret':ret,'p':dp}
return ans | [
"def",
"rank_pv",
"(",
"self",
",",
"dt",
",",
"dt2",
",",
"memlimit",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"lib",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not initialized.\"",
")",
"import",
"numpy",
"as",
"np",
"from",
".",
"auto",
"import",
"ftype_np",
",",
"gtype_np",
"from",
".",
"types",
"import",
"isint",
"if",
"dt",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
"or",
"dt2",
".",
"dtype",
".",
"char",
"!=",
"ftype_np",
":",
"raise",
"ValueError",
"(",
"'Wrong input dtype for gene expression data'",
")",
"if",
"len",
"(",
"dt",
".",
"shape",
")",
"!=",
"2",
"or",
"len",
"(",
"dt2",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"not",
"isint",
"(",
"memlimit",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong memlimit type'",
")",
"ng",
"=",
"dt",
".",
"shape",
"[",
"0",
"]",
"nt",
"=",
"dt2",
".",
"shape",
"[",
"0",
"]",
"ns",
"=",
"dt",
".",
"shape",
"[",
"1",
"]",
"if",
"dt2",
".",
"shape",
"[",
"1",
"]",
"!=",
"ns",
":",
"raise",
"ValueError",
"(",
"'Wrong input shape'",
")",
"if",
"np",
".",
"isnan",
"(",
"dt",
")",
".",
"sum",
"(",
")",
"+",
"np",
".",
"isnan",
"(",
"dt2",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'NaN found.'",
")",
"dp",
"=",
"np",
".",
"require",
"(",
"np",
".",
"zeros",
"(",
"(",
"ng",
",",
"nt",
")",
",",
"dtype",
"=",
"dt",
".",
"dtype",
")",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dtr",
"=",
"np",
".",
"require",
"(",
"dt",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"dt2r",
"=",
"np",
".",
"require",
"(",
"dt2",
",",
"requirements",
"=",
"[",
"'A'",
",",
"'C'",
",",
"'O'",
",",
"'W'",
"]",
")",
"arglist",
"=",
"[",
"'const MATRIXF*'",
",",
"'const MATRIXF*'",
",",
"'MATRIXF*'",
",",
"'size_t'",
"]",
"args",
"=",
"[",
"dtr",
",",
"dt2r",
",",
"dp",
",",
"memlimit",
"]",
"func",
"=",
"self",
".",
"cfunc",
"(",
"'pij_rank_pv'",
",",
"rettype",
"=",
"'int'",
",",
"argtypes",
"=",
"arglist",
")",
"ret",
"=",
"func",
"(",
"*",
"args",
")",
"ans",
"=",
"{",
"'ret'",
":",
"ret",
",",
"'p'",
":",
"dp",
"}",
"return",
"ans"
] | Calculates p-values of gene i correlating with gene j by converting log likelihoods into probabilities per A for all B.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, a subset of, or a superset of dt.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). P-values for A--B.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis1 (similar format) | [
"Calculates",
"p",
"-",
"values",
"of",
"gene",
"i",
"correlating",
"with",
"gene",
"j",
"by",
"converting",
"log",
"likelihoods",
"into",
"probabilities",
"per",
"A",
"for",
"all",
"B",
".",
"dt",
":",
"numpy",
".",
"ndarray",
"(",
"nt",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"A",
"Entry",
"dt",
"[",
"i",
"j",
"]",
"is",
"gene",
"i",
"s",
"expression",
"level",
"for",
"sample",
"j",
".",
"dt2",
":",
"numpy",
".",
"ndarray",
"(",
"nt2",
"ns",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
"Gene",
"expression",
"data",
"for",
"B",
".",
"dt2",
"has",
"the",
"same",
"format",
"as",
"dt",
"and",
"can",
"be",
"identical",
"with",
"different",
"from",
"a",
"subset",
"of",
"or",
"a",
"superset",
"of",
"dt",
".",
"memlimit",
":",
"The",
"approximate",
"memory",
"usage",
"limit",
"in",
"bytes",
"for",
"the",
"library",
".",
"For",
"datasets",
"require",
"a",
"larger",
"memory",
"calculation",
"will",
"fail",
"with",
"an",
"error",
"message",
".",
"memlimit",
"=",
"0",
"defaults",
"to",
"unlimited",
"memory",
"usage",
".",
"Return",
":",
"dictionary",
"with",
"following",
"keys",
":",
"ret",
":",
"0",
"iff",
"execution",
"succeeded",
".",
"p",
":",
"numpy",
".",
"ndarray",
"((",
"nt",
"nt2",
")",
"dtype",
"=",
"ftype",
"(",
"=",
"=",
"f4",
"by",
"default",
"))",
".",
"P",
"-",
"values",
"for",
"A",
"--",
"B",
".",
"ftype",
"and",
"gtype",
"can",
"be",
"found",
"in",
"auto",
".",
"py",
".",
"Example",
":",
"see",
"findr",
".",
"examples",
".",
"geuvadis1",
"(",
"similar",
"format",
")"
] | train | https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L606-L648 |
Duke-QCD/hic | hic/flow.py | qn | def qn(phi, *n):
"""
Calculate the complex flow vector `Q_n`.
:param array-like phi: Azimuthal angles.
:param int n: One or more harmonics to calculate.
:returns:
A single complex number if only one ``n`` was given or a complex array
for multiple ``n``.
"""
phi = np.ravel(phi)
n = np.asarray(n)
i_n_phi = np.zeros((n.size, phi.size), dtype=complex)
np.outer(n, phi, out=i_n_phi.imag)
qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1)
if qn.size == 1:
qn = qn[0]
return qn | python | def qn(phi, *n):
"""
Calculate the complex flow vector `Q_n`.
:param array-like phi: Azimuthal angles.
:param int n: One or more harmonics to calculate.
:returns:
A single complex number if only one ``n`` was given or a complex array
for multiple ``n``.
"""
phi = np.ravel(phi)
n = np.asarray(n)
i_n_phi = np.zeros((n.size, phi.size), dtype=complex)
np.outer(n, phi, out=i_n_phi.imag)
qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1)
if qn.size == 1:
qn = qn[0]
return qn | [
"def",
"qn",
"(",
"phi",
",",
"*",
"n",
")",
":",
"phi",
"=",
"np",
".",
"ravel",
"(",
"phi",
")",
"n",
"=",
"np",
".",
"asarray",
"(",
"n",
")",
"i_n_phi",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
".",
"size",
",",
"phi",
".",
"size",
")",
",",
"dtype",
"=",
"complex",
")",
"np",
".",
"outer",
"(",
"n",
",",
"phi",
",",
"out",
"=",
"i_n_phi",
".",
"imag",
")",
"qn",
"=",
"np",
".",
"exp",
"(",
"i_n_phi",
",",
"out",
"=",
"i_n_phi",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"if",
"qn",
".",
"size",
"==",
"1",
":",
"qn",
"=",
"qn",
"[",
"0",
"]",
"return",
"qn"
] | Calculate the complex flow vector `Q_n`.
:param array-like phi: Azimuthal angles.
:param int n: One or more harmonics to calculate.
:returns:
A single complex number if only one ``n`` was given or a complex array
for multiple ``n``. | [
"Calculate",
"the",
"complex",
"flow",
"vector",
"Q_n",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L13-L36 |
Duke-QCD/hic | hic/flow.py | Cumulant.correlation | def correlation(self, n, k, error=False):
r"""
Calculate `\langle k \rangle_n`,
the `k`-particle correlation function for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error
(for `\langle 2 \rangle` only).
If true, return a tuple ``(corr, corr_error)``.
"""
self._calculate_corr(n, k)
corr_nk = self._corr[n][k]
if error:
self._calculate_corr_err(n, k)
return corr_nk, self._corr_err[n][k]
else:
return corr_nk | python | def correlation(self, n, k, error=False):
r"""
Calculate `\langle k \rangle_n`,
the `k`-particle correlation function for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error
(for `\langle 2 \rangle` only).
If true, return a tuple ``(corr, corr_error)``.
"""
self._calculate_corr(n, k)
corr_nk = self._corr[n][k]
if error:
self._calculate_corr_err(n, k)
return corr_nk, self._corr_err[n][k]
else:
return corr_nk | [
"def",
"correlation",
"(",
"self",
",",
"n",
",",
"k",
",",
"error",
"=",
"False",
")",
":",
"self",
".",
"_calculate_corr",
"(",
"n",
",",
"k",
")",
"corr_nk",
"=",
"self",
".",
"_corr",
"[",
"n",
"]",
"[",
"k",
"]",
"if",
"error",
":",
"self",
".",
"_calculate_corr_err",
"(",
"n",
",",
"k",
")",
"return",
"corr_nk",
",",
"self",
".",
"_corr_err",
"[",
"n",
"]",
"[",
"k",
"]",
"else",
":",
"return",
"corr_nk"
] | r"""
Calculate `\langle k \rangle_n`,
the `k`-particle correlation function for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error
(for `\langle 2 \rangle` only).
If true, return a tuple ``(corr, corr_error)``. | [
"r",
"Calculate",
"\\",
"langle",
"k",
"\\",
"rangle_n",
"the",
"k",
"-",
"particle",
"correlation",
"function",
"for",
"n",
"\\",
"th",
"-",
"order",
"anisotropy",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L154-L175 |
Duke-QCD/hic | hic/flow.py | Cumulant.cumulant | def cumulant(self, n, k, error=False):
r"""
Calculate `c_n\{k\}`,
the `k`-particle cumulant for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `c_n\{2\}` only).
If true, return a tuple ``(cn2, cn2_error)``.
"""
corr_nk = self.correlation(n, k, error=error)
if k == 2:
return corr_nk
elif k == 4:
corr_n2 = self.correlation(n, 2)
return corr_nk - 2*corr_n2*corr_n2 | python | def cumulant(self, n, k, error=False):
r"""
Calculate `c_n\{k\}`,
the `k`-particle cumulant for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `c_n\{2\}` only).
If true, return a tuple ``(cn2, cn2_error)``.
"""
corr_nk = self.correlation(n, k, error=error)
if k == 2:
return corr_nk
elif k == 4:
corr_n2 = self.correlation(n, 2)
return corr_nk - 2*corr_n2*corr_n2 | [
"def",
"cumulant",
"(",
"self",
",",
"n",
",",
"k",
",",
"error",
"=",
"False",
")",
":",
"corr_nk",
"=",
"self",
".",
"correlation",
"(",
"n",
",",
"k",
",",
"error",
"=",
"error",
")",
"if",
"k",
"==",
"2",
":",
"return",
"corr_nk",
"elif",
"k",
"==",
"4",
":",
"corr_n2",
"=",
"self",
".",
"correlation",
"(",
"n",
",",
"2",
")",
"return",
"corr_nk",
"-",
"2",
"*",
"corr_n2",
"*",
"corr_n2"
] | r"""
Calculate `c_n\{k\}`,
the `k`-particle cumulant for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `c_n\{2\}` only).
If true, return a tuple ``(cn2, cn2_error)``. | [
"r",
"Calculate",
"c_n",
"\\",
"{",
"k",
"\\",
"}",
"the",
"k",
"-",
"particle",
"cumulant",
"for",
"n",
"\\",
"th",
"-",
"order",
"anisotropy",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L177-L196 |
Duke-QCD/hic | hic/flow.py | Cumulant.flow | def flow(self, n, k, error=False, imaginary='nan'):
r"""
Calculate `v_n\{k\}`,
the estimate of flow coefficient `v_n` from the `k`-particle cumulant.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `v_n\{2\}` only).
If true, return a tuple ``(vn2, vn2_error)``.
:param str imaginary: (optional)
Determines behavior when the computed flow is imaginary:
- ``'nan'`` (default) -- Return NaN and raise a ``RuntimeWarning``.
- ``'negative'`` -- Return the negative absolute value.
- ``'zero'`` -- Return ``0.0``.
"""
cnk = self.cumulant(n, k, error=error)
if error:
cnk, cnk_err = cnk
vnk_to_k = self._cnk_prefactor[k] * cnk
kinv = 1/k
if vnk_to_k >= 0:
vnk = vnk_to_k**kinv
else:
if imaginary == 'negative':
vnk = -1*(-vnk_to_k)**kinv
elif imaginary == 'zero':
vnk = 0.
else:
warnings.warn('Imaginary flow: returning NaN.', RuntimeWarning)
vnk = float('nan')
if k == 2 and error:
return vnk, .5/np.sqrt(abs(cnk)) * cnk_err
else:
return vnk | python | def flow(self, n, k, error=False, imaginary='nan'):
r"""
Calculate `v_n\{k\}`,
the estimate of flow coefficient `v_n` from the `k`-particle cumulant.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `v_n\{2\}` only).
If true, return a tuple ``(vn2, vn2_error)``.
:param str imaginary: (optional)
Determines behavior when the computed flow is imaginary:
- ``'nan'`` (default) -- Return NaN and raise a ``RuntimeWarning``.
- ``'negative'`` -- Return the negative absolute value.
- ``'zero'`` -- Return ``0.0``.
"""
cnk = self.cumulant(n, k, error=error)
if error:
cnk, cnk_err = cnk
vnk_to_k = self._cnk_prefactor[k] * cnk
kinv = 1/k
if vnk_to_k >= 0:
vnk = vnk_to_k**kinv
else:
if imaginary == 'negative':
vnk = -1*(-vnk_to_k)**kinv
elif imaginary == 'zero':
vnk = 0.
else:
warnings.warn('Imaginary flow: returning NaN.', RuntimeWarning)
vnk = float('nan')
if k == 2 and error:
return vnk, .5/np.sqrt(abs(cnk)) * cnk_err
else:
return vnk | [
"def",
"flow",
"(",
"self",
",",
"n",
",",
"k",
",",
"error",
"=",
"False",
",",
"imaginary",
"=",
"'nan'",
")",
":",
"cnk",
"=",
"self",
".",
"cumulant",
"(",
"n",
",",
"k",
",",
"error",
"=",
"error",
")",
"if",
"error",
":",
"cnk",
",",
"cnk_err",
"=",
"cnk",
"vnk_to_k",
"=",
"self",
".",
"_cnk_prefactor",
"[",
"k",
"]",
"*",
"cnk",
"kinv",
"=",
"1",
"/",
"k",
"if",
"vnk_to_k",
">=",
"0",
":",
"vnk",
"=",
"vnk_to_k",
"**",
"kinv",
"else",
":",
"if",
"imaginary",
"==",
"'negative'",
":",
"vnk",
"=",
"-",
"1",
"*",
"(",
"-",
"vnk_to_k",
")",
"**",
"kinv",
"elif",
"imaginary",
"==",
"'zero'",
":",
"vnk",
"=",
"0.",
"else",
":",
"warnings",
".",
"warn",
"(",
"'Imaginary flow: returning NaN.'",
",",
"RuntimeWarning",
")",
"vnk",
"=",
"float",
"(",
"'nan'",
")",
"if",
"k",
"==",
"2",
"and",
"error",
":",
"return",
"vnk",
",",
".5",
"/",
"np",
".",
"sqrt",
"(",
"abs",
"(",
"cnk",
")",
")",
"*",
"cnk_err",
"else",
":",
"return",
"vnk"
] | r"""
Calculate `v_n\{k\}`,
the estimate of flow coefficient `v_n` from the `k`-particle cumulant.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error (for `v_n\{2\}` only).
If true, return a tuple ``(vn2, vn2_error)``.
:param str imaginary: (optional)
Determines behavior when the computed flow is imaginary:
- ``'nan'`` (default) -- Return NaN and raise a ``RuntimeWarning``.
- ``'negative'`` -- Return the negative absolute value.
- ``'zero'`` -- Return ``0.0``. | [
"r",
"Calculate",
"v_n",
"\\",
"{",
"k",
"\\",
"}",
"the",
"estimate",
"of",
"flow",
"coefficient",
"v_n",
"from",
"the",
"k",
"-",
"particle",
"cumulant",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L200-L242 |
Duke-QCD/hic | hic/flow.py | Sampler._pdf | def _pdf(self, phi):
"""
Evaluate the _unnormalized_ flow PDF.
"""
pdf = np.inner(self._vn, np.cos(np.outer(phi, self._n)))
pdf *= 2.
pdf += 1.
return pdf | python | def _pdf(self, phi):
"""
Evaluate the _unnormalized_ flow PDF.
"""
pdf = np.inner(self._vn, np.cos(np.outer(phi, self._n)))
pdf *= 2.
pdf += 1.
return pdf | [
"def",
"_pdf",
"(",
"self",
",",
"phi",
")",
":",
"pdf",
"=",
"np",
".",
"inner",
"(",
"self",
".",
"_vn",
",",
"np",
".",
"cos",
"(",
"np",
".",
"outer",
"(",
"phi",
",",
"self",
".",
"_n",
")",
")",
")",
"pdf",
"*=",
"2.",
"pdf",
"+=",
"1.",
"return",
"pdf"
] | Evaluate the _unnormalized_ flow PDF. | [
"Evaluate",
"the",
"_unnormalized_",
"flow",
"PDF",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L279-L288 |
Duke-QCD/hic | hic/flow.py | Sampler._uniform_phi | def _uniform_phi(M):
"""
Generate M random numbers in [-pi, pi).
"""
return np.random.uniform(-np.pi, np.pi, M) | python | def _uniform_phi(M):
"""
Generate M random numbers in [-pi, pi).
"""
return np.random.uniform(-np.pi, np.pi, M) | [
"def",
"_uniform_phi",
"(",
"M",
")",
":",
"return",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"np",
".",
"pi",
",",
"np",
".",
"pi",
",",
"M",
")"
] | Generate M random numbers in [-pi, pi). | [
"Generate",
"M",
"random",
"numbers",
"in",
"[",
"-",
"pi",
"pi",
")",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L291-L296 |
Duke-QCD/hic | hic/flow.py | Sampler.pdf | def pdf(self, phi):
r"""
Evaluate the flow PDF `dN/d\phi`.
:param array-like phi: Azimuthal angles.
:returns: The flow PDF evaluated at ``phi``.
"""
if self._n is None:
pdf = np.empty_like(phi)
pdf.fill(.5/np.pi)
return pdf
phi = np.asarray(phi)
pdf = self._pdf(phi)
pdf /= 2.*np.pi
return pdf | python | def pdf(self, phi):
r"""
Evaluate the flow PDF `dN/d\phi`.
:param array-like phi: Azimuthal angles.
:returns: The flow PDF evaluated at ``phi``.
"""
if self._n is None:
pdf = np.empty_like(phi)
pdf.fill(.5/np.pi)
return pdf
phi = np.asarray(phi)
pdf = self._pdf(phi)
pdf /= 2.*np.pi
return pdf | [
"def",
"pdf",
"(",
"self",
",",
"phi",
")",
":",
"if",
"self",
".",
"_n",
"is",
"None",
":",
"pdf",
"=",
"np",
".",
"empty_like",
"(",
"phi",
")",
"pdf",
".",
"fill",
"(",
".5",
"/",
"np",
".",
"pi",
")",
"return",
"pdf",
"phi",
"=",
"np",
".",
"asarray",
"(",
"phi",
")",
"pdf",
"=",
"self",
".",
"_pdf",
"(",
"phi",
")",
"pdf",
"/=",
"2.",
"*",
"np",
".",
"pi",
"return",
"pdf"
] | r"""
Evaluate the flow PDF `dN/d\phi`.
:param array-like phi: Azimuthal angles.
:returns: The flow PDF evaluated at ``phi``. | [
"r",
"Evaluate",
"the",
"flow",
"PDF",
"dN",
"/",
"d",
"\\",
"phi",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L298-L317 |
Duke-QCD/hic | hic/flow.py | Sampler.sample | def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
"""
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due to the overhead of Python functions, it's desirable to minimize
# the number of calls to the random number generator. Therefore I
# sample numbers in chunks; most of the time only one or two chunks
# should be needed. Eventually, I might rewrite this with Cython, but
# it's fast enough for now.
N = 0 # number of phi that have been sampled
phi = np.empty(multiplicity) # allocate array for phi
pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max
while N < multiplicity:
n_remaining = multiplicity - N
n_to_sample = int(1.03*pdf_max*n_remaining)
phi_chunk = self._uniform_phi(n_to_sample)
phi_chunk = phi_chunk[self._pdf(phi_chunk) >
np.random.uniform(0, pdf_max, n_to_sample)]
K = min(phi_chunk.size, n_remaining) # number of phi to take
phi[N:N+K] = phi_chunk[:K]
N += K
return phi | python | def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
"""
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due to the overhead of Python functions, it's desirable to minimize
# the number of calls to the random number generator. Therefore I
# sample numbers in chunks; most of the time only one or two chunks
# should be needed. Eventually, I might rewrite this with Cython, but
# it's fast enough for now.
N = 0 # number of phi that have been sampled
phi = np.empty(multiplicity) # allocate array for phi
pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max
while N < multiplicity:
n_remaining = multiplicity - N
n_to_sample = int(1.03*pdf_max*n_remaining)
phi_chunk = self._uniform_phi(n_to_sample)
phi_chunk = phi_chunk[self._pdf(phi_chunk) >
np.random.uniform(0, pdf_max, n_to_sample)]
K = min(phi_chunk.size, n_remaining) # number of phi to take
phi[N:N+K] = phi_chunk[:K]
N += K
return phi | [
"def",
"sample",
"(",
"self",
",",
"multiplicity",
")",
":",
"if",
"self",
".",
"_n",
"is",
"None",
":",
"return",
"self",
".",
"_uniform_phi",
"(",
"multiplicity",
")",
"# Since the flow PDF does not have an analytic inverse CDF, I use a",
"# simple accept-reject sampling algorithm. This is reasonably",
"# efficient since for normal-sized vn, the PDF is close to flat. Now",
"# due to the overhead of Python functions, it's desirable to minimize",
"# the number of calls to the random number generator. Therefore I",
"# sample numbers in chunks; most of the time only one or two chunks",
"# should be needed. Eventually, I might rewrite this with Cython, but",
"# it's fast enough for now.",
"N",
"=",
"0",
"# number of phi that have been sampled",
"phi",
"=",
"np",
".",
"empty",
"(",
"multiplicity",
")",
"# allocate array for phi",
"pdf_max",
"=",
"1",
"+",
"2",
"*",
"self",
".",
"_vn",
".",
"sum",
"(",
")",
"# sampling efficiency ~ 1/pdf_max",
"while",
"N",
"<",
"multiplicity",
":",
"n_remaining",
"=",
"multiplicity",
"-",
"N",
"n_to_sample",
"=",
"int",
"(",
"1.03",
"*",
"pdf_max",
"*",
"n_remaining",
")",
"phi_chunk",
"=",
"self",
".",
"_uniform_phi",
"(",
"n_to_sample",
")",
"phi_chunk",
"=",
"phi_chunk",
"[",
"self",
".",
"_pdf",
"(",
"phi_chunk",
")",
">",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"pdf_max",
",",
"n_to_sample",
")",
"]",
"K",
"=",
"min",
"(",
"phi_chunk",
".",
"size",
",",
"n_remaining",
")",
"# number of phi to take",
"phi",
"[",
"N",
":",
"N",
"+",
"K",
"]",
"=",
"phi_chunk",
"[",
":",
"K",
"]",
"N",
"+=",
"K",
"return",
"phi"
] | r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles. | [
"r",
"Randomly",
"sample",
"azimuthal",
"angles",
"\\",
"phi",
"."
] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L319-L354 |
letuananh/chirptext | chirptext/anhxa.py | dumps | def dumps(obj, *args, **kwargs):
''' Typeless dump an object to json string '''
return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs) | python | def dumps(obj, *args, **kwargs):
''' Typeless dump an object to json string '''
return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs) | [
"def",
"dumps",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"*",
"args",
",",
"cls",
"=",
"TypelessSONEncoder",
",",
"ensure_ascii",
"=",
"False",
",",
"*",
"*",
"kwargs",
")"
] | Typeless dump an object to json string | [
"Typeless",
"dump",
"an",
"object",
"to",
"json",
"string"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/anhxa.py#L81-L83 |
letuananh/chirptext | chirptext/anhxa.py | flex_update_obj | def flex_update_obj(source, target, __silent, *fields, **field_map):
''' Pull data from source to target.
Target's __dict__ (object data) will be used by default. Otherwise, it'll be treated as a dictionary '''
source_dict = source.__dict__ if hasattr(source, '__dict__') else source
if not fields:
fields = source_dict.keys()
for f in fields:
if f not in source_dict and __silent:
continue
target_f = f if f not in field_map else field_map[f]
setattr(target, target_f, source_dict[f]) | python | def flex_update_obj(source, target, __silent, *fields, **field_map):
''' Pull data from source to target.
Target's __dict__ (object data) will be used by default. Otherwise, it'll be treated as a dictionary '''
source_dict = source.__dict__ if hasattr(source, '__dict__') else source
if not fields:
fields = source_dict.keys()
for f in fields:
if f not in source_dict and __silent:
continue
target_f = f if f not in field_map else field_map[f]
setattr(target, target_f, source_dict[f]) | [
"def",
"flex_update_obj",
"(",
"source",
",",
"target",
",",
"__silent",
",",
"*",
"fields",
",",
"*",
"*",
"field_map",
")",
":",
"source_dict",
"=",
"source",
".",
"__dict__",
"if",
"hasattr",
"(",
"source",
",",
"'__dict__'",
")",
"else",
"source",
"if",
"not",
"fields",
":",
"fields",
"=",
"source_dict",
".",
"keys",
"(",
")",
"for",
"f",
"in",
"fields",
":",
"if",
"f",
"not",
"in",
"source_dict",
"and",
"__silent",
":",
"continue",
"target_f",
"=",
"f",
"if",
"f",
"not",
"in",
"field_map",
"else",
"field_map",
"[",
"f",
"]",
"setattr",
"(",
"target",
",",
"target_f",
",",
"source_dict",
"[",
"f",
"]",
")"
] | Pull data from source to target.
Target's __dict__ (object data) will be used by default. Otherwise, it'll be treated as a dictionary | [
"Pull",
"data",
"from",
"source",
"to",
"target",
".",
"Target",
"s",
"__dict__",
"(",
"object",
"data",
")",
"will",
"be",
"used",
"by",
"default",
".",
"Otherwise",
"it",
"ll",
"be",
"treated",
"as",
"a",
"dictionary"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/anhxa.py#L90-L100 |
letuananh/chirptext | chirptext/anhxa.py | to_obj | def to_obj(cls, obj_data=None, *fields, **field_map):
''' Use obj_data (dict-like) to construct an object of type cls
prioritize obj_dict when there are conficts '''
if not fields:
fields = obj_data.keys()
try:
kwargs = {field(f, field_map): obj_data[f] for f in fields if f in obj_data}
obj = cls(**kwargs)
except:
getLogger().exception("Couldn't use kwargs to construct object")
# use default constructor
obj = cls()
update_obj(obj_data, obj, *fields, **field_map)
return obj | python | def to_obj(cls, obj_data=None, *fields, **field_map):
''' Use obj_data (dict-like) to construct an object of type cls
prioritize obj_dict when there are conficts '''
if not fields:
fields = obj_data.keys()
try:
kwargs = {field(f, field_map): obj_data[f] for f in fields if f in obj_data}
obj = cls(**kwargs)
except:
getLogger().exception("Couldn't use kwargs to construct object")
# use default constructor
obj = cls()
update_obj(obj_data, obj, *fields, **field_map)
return obj | [
"def",
"to_obj",
"(",
"cls",
",",
"obj_data",
"=",
"None",
",",
"*",
"fields",
",",
"*",
"*",
"field_map",
")",
":",
"if",
"not",
"fields",
":",
"fields",
"=",
"obj_data",
".",
"keys",
"(",
")",
"try",
":",
"kwargs",
"=",
"{",
"field",
"(",
"f",
",",
"field_map",
")",
":",
"obj_data",
"[",
"f",
"]",
"for",
"f",
"in",
"fields",
"if",
"f",
"in",
"obj_data",
"}",
"obj",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"except",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"\"Couldn't use kwargs to construct object\"",
")",
"# use default constructor",
"obj",
"=",
"cls",
"(",
")",
"update_obj",
"(",
"obj_data",
",",
"obj",
",",
"*",
"fields",
",",
"*",
"*",
"field_map",
")",
"return",
"obj"
] | Use obj_data (dict-like) to construct an object of type cls
prioritize obj_dict when there are conficts | [
"Use",
"obj_data",
"(",
"dict",
"-",
"like",
")",
"to",
"construct",
"an",
"object",
"of",
"type",
"cls",
"prioritize",
"obj_dict",
"when",
"there",
"are",
"conficts"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/anhxa.py#L113-L126 |
letuananh/chirptext | chirptext/deko.py | is_kana | def is_kana(text):
''' Check if a text if written in kana only (hiragana & katakana)
if text is empty then return True
'''
if text is None:
raise ValueError("text cannot be None")
for c in text:
if c not in HIRAGANA and c not in KATAKANA:
return False
return True | python | def is_kana(text):
''' Check if a text if written in kana only (hiragana & katakana)
if text is empty then return True
'''
if text is None:
raise ValueError("text cannot be None")
for c in text:
if c not in HIRAGANA and c not in KATAKANA:
return False
return True | [
"def",
"is_kana",
"(",
"text",
")",
":",
"if",
"text",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"text cannot be None\"",
")",
"for",
"c",
"in",
"text",
":",
"if",
"c",
"not",
"in",
"HIRAGANA",
"and",
"c",
"not",
"in",
"KATAKANA",
":",
"return",
"False",
"return",
"True"
] | Check if a text if written in kana only (hiragana & katakana)
if text is empty then return True | [
"Check",
"if",
"a",
"text",
"if",
"written",
"in",
"kana",
"only",
"(",
"hiragana",
"&",
"katakana",
")",
"if",
"text",
"is",
"empty",
"then",
"return",
"True"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L62-L71 |
letuananh/chirptext | chirptext/deko.py | txt2mecab | def txt2mecab(text, **kwargs):
''' Use mecab to parse one sentence '''
mecab_out = _internal_mecab_parse(text, **kwargs).splitlines()
tokens = [MeCabToken.parse(x) for x in mecab_out]
return MeCabSent(text, tokens) | python | def txt2mecab(text, **kwargs):
''' Use mecab to parse one sentence '''
mecab_out = _internal_mecab_parse(text, **kwargs).splitlines()
tokens = [MeCabToken.parse(x) for x in mecab_out]
return MeCabSent(text, tokens) | [
"def",
"txt2mecab",
"(",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"mecab_out",
"=",
"_internal_mecab_parse",
"(",
"text",
",",
"*",
"*",
"kwargs",
")",
".",
"splitlines",
"(",
")",
"tokens",
"=",
"[",
"MeCabToken",
".",
"parse",
"(",
"x",
")",
"for",
"x",
"in",
"mecab_out",
"]",
"return",
"MeCabSent",
"(",
"text",
",",
"tokens",
")"
] | Use mecab to parse one sentence | [
"Use",
"mecab",
"to",
"parse",
"one",
"sentence"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L250-L254 |
letuananh/chirptext | chirptext/deko.py | lines2mecab | def lines2mecab(lines, **kwargs):
''' Use mecab to parse many lines '''
sents = []
for line in lines:
sent = txt2mecab(line, **kwargs)
sents.append(sent)
return sents | python | def lines2mecab(lines, **kwargs):
''' Use mecab to parse many lines '''
sents = []
for line in lines:
sent = txt2mecab(line, **kwargs)
sents.append(sent)
return sents | [
"def",
"lines2mecab",
"(",
"lines",
",",
"*",
"*",
"kwargs",
")",
":",
"sents",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"sent",
"=",
"txt2mecab",
"(",
"line",
",",
"*",
"*",
"kwargs",
")",
"sents",
".",
"append",
"(",
"sent",
")",
"return",
"sents"
] | Use mecab to parse many lines | [
"Use",
"mecab",
"to",
"parse",
"many",
"lines"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L257-L263 |
letuananh/chirptext | chirptext/deko.py | tokenize_sent | def tokenize_sent(mtokens, raw='', auto_strip=True):
''' Tokenize a text to multiple sentences '''
sents = []
bucket = []
cfrom = 0
cto = 0
token_cfrom = 0
logger = getLogger()
logger.debug("raw text: {}".format(raw))
logger.debug("tokens: {}".format(mtokens))
for t in mtokens:
if t.is_eos:
continue
token_cfrom = raw.find(t.surface, cto)
cto = token_cfrom + len(t.surface) # also token_cto
logger.debug("processing token {} <{}:{}>".format(t, token_cfrom, cto))
bucket.append(t)
# sentence ending
if t.pos == '記号' and t.sc1 == '句点':
sent_text = raw[cfrom:cto]
getLogger().debug("sent_text = {} | <{}:{}>".format(sent_text, cfrom, cto))
if auto_strip:
sent_text = sent_text.strip()
sents.append(MeCabSent(sent_text, bucket))
logger.debug("Found a sentence: {}".format(sent_text))
cfrom = cto
bucket = []
if bucket:
logger.debug("Bucket is not empty: {}".format(bucket))
sent_text = raw[cfrom:cto]
logger.debug("remaining text: {} [{}:{}]".format(sent_text, cfrom, cto))
if auto_strip:
sent_text = sent_text.strip()
sents.append(MeCabSent(sent_text, bucket))
return sents | python | def tokenize_sent(mtokens, raw='', auto_strip=True):
''' Tokenize a text to multiple sentences '''
sents = []
bucket = []
cfrom = 0
cto = 0
token_cfrom = 0
logger = getLogger()
logger.debug("raw text: {}".format(raw))
logger.debug("tokens: {}".format(mtokens))
for t in mtokens:
if t.is_eos:
continue
token_cfrom = raw.find(t.surface, cto)
cto = token_cfrom + len(t.surface) # also token_cto
logger.debug("processing token {} <{}:{}>".format(t, token_cfrom, cto))
bucket.append(t)
# sentence ending
if t.pos == '記号' and t.sc1 == '句点':
sent_text = raw[cfrom:cto]
getLogger().debug("sent_text = {} | <{}:{}>".format(sent_text, cfrom, cto))
if auto_strip:
sent_text = sent_text.strip()
sents.append(MeCabSent(sent_text, bucket))
logger.debug("Found a sentence: {}".format(sent_text))
cfrom = cto
bucket = []
if bucket:
logger.debug("Bucket is not empty: {}".format(bucket))
sent_text = raw[cfrom:cto]
logger.debug("remaining text: {} [{}:{}]".format(sent_text, cfrom, cto))
if auto_strip:
sent_text = sent_text.strip()
sents.append(MeCabSent(sent_text, bucket))
return sents | [
"def",
"tokenize_sent",
"(",
"mtokens",
",",
"raw",
"=",
"''",
",",
"auto_strip",
"=",
"True",
")",
":",
"sents",
"=",
"[",
"]",
"bucket",
"=",
"[",
"]",
"cfrom",
"=",
"0",
"cto",
"=",
"0",
"token_cfrom",
"=",
"0",
"logger",
"=",
"getLogger",
"(",
")",
"logger",
".",
"debug",
"(",
"\"raw text: {}\"",
".",
"format",
"(",
"raw",
")",
")",
"logger",
".",
"debug",
"(",
"\"tokens: {}\"",
".",
"format",
"(",
"mtokens",
")",
")",
"for",
"t",
"in",
"mtokens",
":",
"if",
"t",
".",
"is_eos",
":",
"continue",
"token_cfrom",
"=",
"raw",
".",
"find",
"(",
"t",
".",
"surface",
",",
"cto",
")",
"cto",
"=",
"token_cfrom",
"+",
"len",
"(",
"t",
".",
"surface",
")",
"# also token_cto",
"logger",
".",
"debug",
"(",
"\"processing token {} <{}:{}>\"",
".",
"format",
"(",
"t",
",",
"token_cfrom",
",",
"cto",
")",
")",
"bucket",
".",
"append",
"(",
"t",
")",
"# sentence ending",
"if",
"t",
".",
"pos",
"==",
"'記号' and",
"t.s",
"1",
" ",
"== ",
"句点",
":",
"",
"sent_text",
"=",
"raw",
"[",
"cfrom",
":",
"cto",
"]",
"getLogger",
"(",
")",
".",
"debug",
"(",
"\"sent_text = {} | <{}:{}>\"",
".",
"format",
"(",
"sent_text",
",",
"cfrom",
",",
"cto",
")",
")",
"if",
"auto_strip",
":",
"sent_text",
"=",
"sent_text",
".",
"strip",
"(",
")",
"sents",
".",
"append",
"(",
"MeCabSent",
"(",
"sent_text",
",",
"bucket",
")",
")",
"logger",
".",
"debug",
"(",
"\"Found a sentence: {}\"",
".",
"format",
"(",
"sent_text",
")",
")",
"cfrom",
"=",
"cto",
"bucket",
"=",
"[",
"]",
"if",
"bucket",
":",
"logger",
".",
"debug",
"(",
"\"Bucket is not empty: {}\"",
".",
"format",
"(",
"bucket",
")",
")",
"sent_text",
"=",
"raw",
"[",
"cfrom",
":",
"cto",
"]",
"logger",
".",
"debug",
"(",
"\"remaining text: {} [{}:{}]\"",
".",
"format",
"(",
"sent_text",
",",
"cfrom",
",",
"cto",
")",
")",
"if",
"auto_strip",
":",
"sent_text",
"=",
"sent_text",
".",
"strip",
"(",
")",
"sents",
".",
"append",
"(",
"MeCabSent",
"(",
"sent_text",
",",
"bucket",
")",
")",
"return",
"sents"
] | Tokenize a text to multiple sentences | [
"Tokenize",
"a",
"text",
"to",
"multiple",
"sentences"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L274-L308 |
letuananh/chirptext | chirptext/deko.py | analyse | def analyse(content, splitlines=True, format=None, **kwargs):
''' Japanese text > tokenize/txt/html '''
sents = DekoText.parse(content, splitlines=splitlines, **kwargs)
doc = []
final = sents
# Generate output
if format == 'html':
for sent in sents:
doc.append(sent.to_ruby())
final = '<br/>\n'.join(doc)
elif format == 'csv':
for sent in sents:
doc.append('\n'.join([x.to_csv() for x in sent]) + '\n')
final = '\n'.join(doc)
elif format == 'txt':
final = '\n'.join([str(x) for x in sents])
return final | python | def analyse(content, splitlines=True, format=None, **kwargs):
''' Japanese text > tokenize/txt/html '''
sents = DekoText.parse(content, splitlines=splitlines, **kwargs)
doc = []
final = sents
# Generate output
if format == 'html':
for sent in sents:
doc.append(sent.to_ruby())
final = '<br/>\n'.join(doc)
elif format == 'csv':
for sent in sents:
doc.append('\n'.join([x.to_csv() for x in sent]) + '\n')
final = '\n'.join(doc)
elif format == 'txt':
final = '\n'.join([str(x) for x in sents])
return final | [
"def",
"analyse",
"(",
"content",
",",
"splitlines",
"=",
"True",
",",
"format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"sents",
"=",
"DekoText",
".",
"parse",
"(",
"content",
",",
"splitlines",
"=",
"splitlines",
",",
"*",
"*",
"kwargs",
")",
"doc",
"=",
"[",
"]",
"final",
"=",
"sents",
"# Generate output",
"if",
"format",
"==",
"'html'",
":",
"for",
"sent",
"in",
"sents",
":",
"doc",
".",
"append",
"(",
"sent",
".",
"to_ruby",
"(",
")",
")",
"final",
"=",
"'<br/>\\n'",
".",
"join",
"(",
"doc",
")",
"elif",
"format",
"==",
"'csv'",
":",
"for",
"sent",
"in",
"sents",
":",
"doc",
".",
"append",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"x",
".",
"to_csv",
"(",
")",
"for",
"x",
"in",
"sent",
"]",
")",
"+",
"'\\n'",
")",
"final",
"=",
"'\\n'",
".",
"join",
"(",
"doc",
")",
"elif",
"format",
"==",
"'txt'",
":",
"final",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"sents",
"]",
")",
"return",
"final"
] | Japanese text > tokenize/txt/html | [
"Japanese",
"text",
">",
"tokenize",
"/",
"txt",
"/",
"html"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L311-L327 |
letuananh/chirptext | chirptext/deko.py | MeCabToken.pos3 | def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) | python | def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) | [
"def",
"pos3",
"(",
"self",
")",
":",
"parts",
"=",
"[",
"self",
".",
"pos",
"]",
"if",
"self",
".",
"sc1",
"and",
"self",
".",
"sc1",
"!=",
"'*'",
":",
"parts",
".",
"append",
"(",
"self",
".",
"sc1",
")",
"if",
"self",
".",
"sc2",
"and",
"self",
".",
"sc2",
"!=",
"'*'",
":",
"parts",
".",
"append",
"(",
"self",
".",
"sc2",
")",
"return",
"'-'",
".",
"join",
"(",
"parts",
")"
] | Use pos-sc1-sc2 as POS | [
"Use",
"pos",
"-",
"sc1",
"-",
"sc2",
"as",
"POS"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L113-L120 |
letuananh/chirptext | chirptext/deko.py | MeCabToken.to_ruby | def to_ruby(self):
''' Convert one MeCabToken into HTML '''
if self.need_ruby():
surface = self.surface
reading = self.reading_hira()
return '<ruby><rb>{sur}</rb><rt>{read}</rt></ruby>'.format(sur=surface, read=reading)
elif self.is_eos:
return ''
else:
return self.surface | python | def to_ruby(self):
''' Convert one MeCabToken into HTML '''
if self.need_ruby():
surface = self.surface
reading = self.reading_hira()
return '<ruby><rb>{sur}</rb><rt>{read}</rt></ruby>'.format(sur=surface, read=reading)
elif self.is_eos:
return ''
else:
return self.surface | [
"def",
"to_ruby",
"(",
"self",
")",
":",
"if",
"self",
".",
"need_ruby",
"(",
")",
":",
"surface",
"=",
"self",
".",
"surface",
"reading",
"=",
"self",
".",
"reading_hira",
"(",
")",
"return",
"'<ruby><rb>{sur}</rb><rt>{read}</rt></ruby>'",
".",
"format",
"(",
"sur",
"=",
"surface",
",",
"read",
"=",
"reading",
")",
"elif",
"self",
".",
"is_eos",
":",
"return",
"''",
"else",
":",
"return",
"self",
".",
"surface"
] | Convert one MeCabToken into HTML | [
"Convert",
"one",
"MeCabToken",
"into",
"HTML"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L122-L131 |
letuananh/chirptext | chirptext/deko.py | DekoText.add | def add(self, sentence_text, **kwargs):
''' Parse a text string and add it to this doc '''
sent = MeCabSent.parse(sentence_text, **kwargs)
self.sents.append(sent)
return sent | python | def add(self, sentence_text, **kwargs):
''' Parse a text string and add it to this doc '''
sent = MeCabSent.parse(sentence_text, **kwargs)
self.sents.append(sent)
return sent | [
"def",
"add",
"(",
"self",
",",
"sentence_text",
",",
"*",
"*",
"kwargs",
")",
":",
"sent",
"=",
"MeCabSent",
".",
"parse",
"(",
"sentence_text",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"sents",
".",
"append",
"(",
"sent",
")",
"return",
"sent"
] | Parse a text string and add it to this doc | [
"Parse",
"a",
"text",
"string",
"and",
"add",
"it",
"to",
"this",
"doc"
] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/deko.py#L214-L218 |
alefnula/tea | tea/utils/encoding.py | smart_text | def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""Return a unicode object representing 's'.
Treats bytes using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.text_type):
return s
if strings_only and not isinstance(s, six.string_types):
return s
if not isinstance(s, six.string_types):
if hasattr(s, "__unicode__"):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, six.binary_type):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(six.binary_type(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
return s | python | def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""Return a unicode object representing 's'.
Treats bytes using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.text_type):
return s
if strings_only and not isinstance(s, six.string_types):
return s
if not isinstance(s, six.string_types):
if hasattr(s, "__unicode__"):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, six.binary_type):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(six.binary_type(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
return s | [
"def",
"smart_text",
"(",
"s",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"strings_only",
"=",
"False",
",",
"errors",
"=",
"\"strict\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"text_type",
")",
":",
"return",
"s",
"if",
"strings_only",
"and",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"return",
"s",
"if",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"if",
"hasattr",
"(",
"s",
",",
"\"__unicode__\"",
")",
":",
"s",
"=",
"s",
".",
"__unicode__",
"(",
")",
"else",
":",
"if",
"six",
".",
"PY3",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"s",
"=",
"six",
".",
"text_type",
"(",
"s",
",",
"encoding",
",",
"errors",
")",
"else",
":",
"s",
"=",
"six",
".",
"text_type",
"(",
"s",
")",
"else",
":",
"s",
"=",
"six",
".",
"text_type",
"(",
"six",
".",
"binary_type",
"(",
"s",
")",
",",
"encoding",
",",
"errors",
")",
"else",
":",
"# Note: We use .decode() here, instead of six.text_type(s, encoding,\r",
"# errors), so that if s is a SafeBytes, it ends up being a\r",
"# SafeText at the end.\r",
"s",
"=",
"s",
".",
"decode",
"(",
"encoding",
",",
"errors",
")",
"return",
"s"
] | Return a unicode object representing 's'.
Treats bytes using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects. | [
"Return",
"a",
"unicode",
"object",
"representing",
"s",
".",
"Treats",
"bytes",
"using",
"the",
"encoding",
"codec",
".",
"If",
"strings_only",
"is",
"True",
"don",
"t",
"convert",
"(",
"some",
")",
"non",
"-",
"string",
"-",
"like",
"objects",
"."
] | train | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/encoding.py#L8-L35 |
alefnula/tea | tea/utils/encoding.py | smart_bytes | def smart_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""Return a bytes version of 's' encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.binary_type):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and not isinstance(s, six.text_type):
return s
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return six.binary_type(s)
except UnicodeEncodeError:
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors) | python | def smart_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""Return a bytes version of 's' encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.binary_type):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and not isinstance(s, six.text_type):
return s
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return six.binary_type(s)
except UnicodeEncodeError:
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors) | [
"def",
"smart_bytes",
"(",
"s",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"strings_only",
"=",
"False",
",",
"errors",
"=",
"\"strict\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"if",
"encoding",
"==",
"\"utf-8\"",
":",
"return",
"s",
"else",
":",
"return",
"s",
".",
"decode",
"(",
"\"utf-8\"",
",",
"errors",
")",
".",
"encode",
"(",
"encoding",
",",
"errors",
")",
"if",
"strings_only",
"and",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"text_type",
")",
":",
"return",
"s",
"if",
"not",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
"if",
"six",
".",
"PY3",
":",
"return",
"six",
".",
"text_type",
"(",
"s",
")",
".",
"encode",
"(",
"encoding",
")",
"else",
":",
"return",
"six",
".",
"binary_type",
"(",
"s",
")",
"except",
"UnicodeEncodeError",
":",
"return",
"six",
".",
"text_type",
"(",
"s",
")",
".",
"encode",
"(",
"encoding",
",",
"errors",
")",
"else",
":",
"return",
"s",
".",
"encode",
"(",
"encoding",
",",
"errors",
")"
] | Return a bytes version of 's' encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects. | [
"Return",
"a",
"bytes",
"version",
"of",
"s",
"encoded",
"as",
"specified",
"in",
"encoding",
".",
"If",
"strings_only",
"is",
"True",
"don",
"t",
"convert",
"(",
"some",
")",
"non",
"-",
"string",
"-",
"like",
"objects",
"."
] | train | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/utils/encoding.py#L38-L59 |
Subsets and Splits