body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def _repr_data_resource_(self): '\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n ' if config.get_option('display.html.table_schema'): data = self.head(config.get_option('display.max_rows')) payload = json.loads(data.to_json(orient='table'), object_pairs_hook=collections.OrderedDict) return payload
2,804,131,655,790,914,600
Not a real Jupyter special repr method, but we use the same naming convention.
pandas/core/generic.py
_repr_data_resource_
kapilepatel/pandas
python
def _repr_data_resource_(self): '\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n ' if config.get_option('display.html.table_schema'): data = self.head(config.get_option('display.max_rows')) payload = json.loads(data.to_json(orient='table'), object_pairs_hook=collections.OrderedDict) return payload
def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', default_handler=None, lines=False, compression='infer', index=True): '\n Convert the object to a JSON string.\n\n Note NaN\'s and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : string or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : string\n Indication of expected JSON string format.\n\n * Series\n\n - default is \'index\'\n - allowed values are: {\'split\',\'records\',\'index\',\'table\'}\n\n * DataFrame\n\n - default is \'columns\'\n - allowed values are:\n {\'split\',\'records\',\'index\',\'columns\',\'values\',\'table\'}\n\n * The format of the JSON string\n\n - \'split\' : dict like {\'index\' -> [index],\n \'columns\' -> [columns], \'data\' -> [values]}\n - \'records\' : list like\n [{column -> value}, ... , {column -> value}]\n - \'index\' : dict like {index -> {column -> value}}\n - \'columns\' : dict like {column -> {index -> value}}\n - \'values\' : just the values array\n - \'table\' : dict like {\'schema\': {schema}, \'data\': {data}}\n describing the data, and the data component is\n like ``orient=\'records\'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, \'epoch\', \'iso\'}\n Type of date conversion. \'epoch\' = epoch milliseconds,\n \'iso\' = ISO8601. The default depends on the `orient`. For\n ``orient=\'table\'``, the default is \'iso\'. For all other orients,\n the default is \'epoch\'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : string, default \'ms\' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of \'s\', \'ms\', \'us\', \'ns\' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If \'orient\' is \'records\' write out line delimited json format. Will\n throw ValueError if incorrect \'orient\' since others are not list\n like.\n\n .. versionadded:: 0.19.0\n\n compression : {\'infer\', \'gzip\', \'bz2\', \'zip\', \'xz\', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n \'infer\' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is \'split\' or \'table\'.\n\n .. versionadded:: 0.23.0\n\n See Also\n --------\n read_json\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... index=[\'row 1\', \'row 2\'],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df.to_json(orient=\'split\')\n \'{"columns":["col 1","col 2"],\n "index":["row 1","row 2"],\n "data":[["a","b"],["c","d"]]}\'\n\n Encoding/decoding a Dataframe using ``\'records\'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient=\'records\')\n \'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]\'\n\n Encoding/decoding a Dataframe using ``\'index\'`` formatted JSON:\n\n >>> df.to_json(orient=\'index\')\n \'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}\'\n\n Encoding/decoding a Dataframe using ``\'columns\'`` formatted JSON:\n\n >>> df.to_json(orient=\'columns\')\n \'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}\'\n\n Encoding/decoding a Dataframe using ``\'values\'`` formatted JSON:\n\n >>> df.to_json(orient=\'values\')\n \'[["a","b"],["c","d"]]\'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient=\'table\')\n \'{"schema": {"fields": [{"name": "index", "type": "string"},\n {"name": "col 1", "type": "string"},\n {"name": "col 2", "type": "string"}],\n "primaryKey": "index",\n "pandas_version": "0.20.0"},\n "data": [{"index": "row 1", "col 1": "a", "col 2": "b"},\n {"index": "row 2", "col 1": "c", "col 2": "d"}]}\'\n ' from pandas.io import json if ((date_format is None) and (orient == 'table')): date_format = 'iso' elif (date_format is None): date_format = 'epoch' return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index)
-6,005,430,837,243,634,000
Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : string or file handle, optional File path or object. If not specified, the result is returned as a string. orient : string Indication of expected JSON string format. * Series - default is 'index' - allowed values are: {'split','records','index','table'} * DataFrame - default is 'columns' - allowed values are: {'split','records','index','columns','values','table'} * The format of the JSON string - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} - 'columns' : dict like {column -> {index -> value}} - 'values' : just the values array - 'table' : dict like {'schema': {schema}, 'data': {data}} describing the data, and the data component is like ``orient='records'``. .. versionchanged:: 0.20.0 date_format : {None, 'epoch', 'iso'} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : string, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line delimited json format. Will throw ValueError if incorrect 'orient' since others are not list like. .. versionadded:: 0.19.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} A string representing the compression to use in the output file, only used when the first argument is a filename. By default, the compression is inferred from the filename. .. versionadded:: 0.21.0 .. versionchanged:: 0.24.0 'infer' option added and set to default index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. .. versionadded:: 0.23.0 See Also -------- read_json Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> df.to_json(orient='columns') '{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}' Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> df.to_json(orient='values') '[["a","b"],["c","d"]]' Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}'
pandas/core/generic.py
to_json
kapilepatel/pandas
python
def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', default_handler=None, lines=False, compression='infer', index=True): '\n Convert the object to a JSON string.\n\n Note NaN\'s and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : string or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : string\n Indication of expected JSON string format.\n\n * Series\n\n - default is \'index\'\n - allowed values are: {\'split\',\'records\',\'index\',\'table\'}\n\n * DataFrame\n\n - default is \'columns\'\n - allowed values are:\n {\'split\',\'records\',\'index\',\'columns\',\'values\',\'table\'}\n\n * The format of the JSON string\n\n - \'split\' : dict like {\'index\' -> [index],\n \'columns\' -> [columns], \'data\' -> [values]}\n - \'records\' : list like\n [{column -> value}, ... , {column -> value}]\n - \'index\' : dict like {index -> {column -> value}}\n - \'columns\' : dict like {column -> {index -> value}}\n - \'values\' : just the values array\n - \'table\' : dict like {\'schema\': {schema}, \'data\': {data}}\n describing the data, and the data component is\n like ``orient=\'records\'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, \'epoch\', \'iso\'}\n Type of date conversion. \'epoch\' = epoch milliseconds,\n \'iso\' = ISO8601. The default depends on the `orient`. For\n ``orient=\'table\'``, the default is \'iso\'. For all other orients,\n the default is \'epoch\'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : string, default \'ms\' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of \'s\', \'ms\', \'us\', \'ns\' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If \'orient\' is \'records\' write out line delimited json format. Will\n throw ValueError if incorrect \'orient\' since others are not list\n like.\n\n .. versionadded:: 0.19.0\n\n compression : {\'infer\', \'gzip\', \'bz2\', \'zip\', \'xz\', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n \'infer\' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is \'split\' or \'table\'.\n\n .. versionadded:: 0.23.0\n\n See Also\n --------\n read_json\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... index=[\'row 1\', \'row 2\'],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df.to_json(orient=\'split\')\n \'{"columns":["col 1","col 2"],\n "index":["row 1","row 2"],\n "data":[["a","b"],["c","d"]]}\'\n\n Encoding/decoding a Dataframe using ``\'records\'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient=\'records\')\n \'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]\'\n\n Encoding/decoding a Dataframe using ``\'index\'`` formatted JSON:\n\n >>> df.to_json(orient=\'index\')\n \'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}\'\n\n Encoding/decoding a Dataframe using ``\'columns\'`` formatted JSON:\n\n >>> df.to_json(orient=\'columns\')\n \'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}\'\n\n Encoding/decoding a Dataframe using ``\'values\'`` formatted JSON:\n\n >>> df.to_json(orient=\'values\')\n \'[["a","b"],["c","d"]]\'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient=\'table\')\n \'{"schema": {"fields": [{"name": "index", "type": "string"},\n {"name": "col 1", "type": "string"},\n {"name": "col 2", "type": "string"}],\n "primaryKey": "index",\n "pandas_version": "0.20.0"},\n "data": [{"index": "row 1", "col 1": "a", "col 2": "b"},\n {"index": "row 2", "col 1": "c", "col 2": "d"}]}\'\n ' from pandas.io import json if ((date_format is None) and (orient == 'table')): date_format = 'iso' elif (date_format is None): date_format = 'epoch' return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index)
def to_hdf(self, path_or_buf, key, **kwargs): "\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n format : {'fixed', 'table'}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n append : bool, default False\n For Table formats, append the input data to the existing.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum.\n dropna : bool, default False\n If true, ALL nan rows will not be written to store.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n " from pandas.io import pytables return pytables.to_hdf(path_or_buf, key, self, **kwargs)
7,546,449,388,981,569,000
Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. format : {'fixed', 'table'}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. append : bool, default False For Table formats, append the input data to the existing. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`io.hdf5-query-data-columns`. Applicable only to format='table'. complevel : {0-9}, optional Specifies a compression level for data. A value of 0 disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. fletcher32 : bool, default False If applying compression use the fletcher32 checksum. dropna : bool, default False If true, ALL nan rows will not be written to store. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. See Also -------- DataFrame.read_hdf : Read from HDF file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a sql table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) >>> df.to_hdf('data.h5', key='df', mode='w') We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_hdf('data.h5', key='s') Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') 0 1 1 2 2 3 3 4 dtype: int64 Deleting file with data: >>> import os >>> os.remove('data.h5')
pandas/core/generic.py
to_hdf
kapilepatel/pandas
python
def to_hdf(self, path_or_buf, key, **kwargs): "\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n format : {'fixed', 'table'}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n append : bool, default False\n For Table formats, append the input data to the existing.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum.\n dropna : bool, default False\n If true, ALL nan rows will not be written to store.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n " from pandas.io import pytables return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): '\n Serialize object to input file path using msgpack format.\n\n THIS IS AN EXPERIMENTAL LIBRARY and the storage format\n may not be stable until a future release.\n\n Parameters\n ----------\n path : string File path, buffer-like, or None\n if None, return generated string\n append : bool whether to append to an existing msgpack\n (default is False)\n compress : type of compressor (zlib or blosc), default to None (no\n compression)\n ' from pandas.io import packers return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs)
6,332,235,171,785,933,000
Serialize object to input file path using msgpack format. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path : string File path, buffer-like, or None if None, return generated string append : bool whether to append to an existing msgpack (default is False) compress : type of compressor (zlib or blosc), default to None (no compression)
pandas/core/generic.py
to_msgpack
kapilepatel/pandas
python
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): '\n Serialize object to input file path using msgpack format.\n\n THIS IS AN EXPERIMENTAL LIBRARY and the storage format\n may not be stable until a future release.\n\n Parameters\n ----------\n path : string File path, buffer-like, or None\n if None, return generated string\n append : bool whether to append to an existing msgpack\n (default is False)\n compress : type of compressor (zlib or blosc), default to None (no\n compression)\n ' from pandas.io import packers return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs)
def to_sql(self, name, con, schema=None, if_exists='fail', index=True, index_label=None, chunksize=None, dtype=None, method=None): '\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : string\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects.\n schema : string, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {\'fail\', \'replace\', \'append\'}, default \'fail\'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Rows will be written in batches of this size at a time. By default,\n all rows will be written at once.\n dtype : dict, optional\n Specifying the datatype for columns. The keys should be the column\n names and the values should be the SQLAlchemy types or strings for\n the sqlite3 legacy mode.\n method : {None, \'multi\', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * \'multi\': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is \'fail\' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] http://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine(\'sqlite://\', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({\'name\' : [\'User 1\', \'User 2\', \'User 3\']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql(\'users\', con=engine)\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 1\'), (1, \'User 2\'), (2, \'User 3\')]\n\n >>> df1 = pd.DataFrame({\'name\' : [\'User 4\', \'User 5\']})\n >>> df1.to_sql(\'users\', con=engine, if_exists=\'append\')\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 1\'), (1, \'User 2\'), (2, \'User 3\'),\n (0, \'User 4\'), (1, \'User 5\')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql(\'users\', con=engine, if_exists=\'replace\',\n ... index_label=\'id\')\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 4\'), (1, \'User 5\')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({"A": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql(\'integers\', con=engine, index=False,\n ... dtype={"A": Integer()})\n\n >>> engine.execute("SELECT * FROM integers").fetchall()\n [(1,), (None,), (2,)]\n ' from pandas.io import sql sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method)
5,548,303,788,474,474,000
Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : string Name of SQL table. con : sqlalchemy.engine.Engine or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Rows will be written in batches of this size at a time. By default, all rows will be written at once. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. .. versionadded:: 0.24.0 References ---------- .. [1] http://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) >>> df1.to_sql('users', con=engine, if_exists='append') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5')] Overwrite the table with just ``df1``. >>> df1.to_sql('users', con=engine, if_exists='replace', ... index_label='id') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 4'), (1, 'User 5')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) >>> engine.execute("SELECT * FROM integers").fetchall() [(1,), (None,), (2,)]
pandas/core/generic.py
to_sql
kapilepatel/pandas
python
def to_sql(self, name, con, schema=None, if_exists='fail', index=True, index_label=None, chunksize=None, dtype=None, method=None): '\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : string\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects.\n schema : string, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {\'fail\', \'replace\', \'append\'}, default \'fail\'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Rows will be written in batches of this size at a time. By default,\n all rows will be written at once.\n dtype : dict, optional\n Specifying the datatype for columns. The keys should be the column\n names and the values should be the SQLAlchemy types or strings for\n the sqlite3 legacy mode.\n method : {None, \'multi\', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * \'multi\': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is \'fail\' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] http://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine(\'sqlite://\', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({\'name\' : [\'User 1\', \'User 2\', \'User 3\']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql(\'users\', con=engine)\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 1\'), (1, \'User 2\'), (2, \'User 3\')]\n\n >>> df1 = pd.DataFrame({\'name\' : [\'User 4\', \'User 5\']})\n >>> df1.to_sql(\'users\', con=engine, if_exists=\'append\')\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 1\'), (1, \'User 2\'), (2, \'User 3\'),\n (0, \'User 4\'), (1, \'User 5\')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql(\'users\', con=engine, if_exists=\'replace\',\n ... index_label=\'id\')\n >>> engine.execute("SELECT * FROM users").fetchall()\n [(0, \'User 4\'), (1, \'User 5\')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({"A": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql(\'integers\', con=engine, index=False,\n ... dtype={"A": Integer()})\n\n >>> engine.execute("SELECT * FROM integers").fetchall()\n [(1,), (None,), (2,)]\n ' from pandas.io import sql sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method)
def to_pickle(self, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): '\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {\'infer\', \'gzip\', \'bz2\', \'zip\', \'xz\', None}, default \'infer\'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n\n .. versionadded:: 0.20.0\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values for this parameter depend on the version of Python. For\n Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a\n valid value. For Python >= 3.4, 4 is a valid value. A negative\n value for the protocol parameter is equivalent to setting its value\n to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html\n .. versionadded:: 0.21.0\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle("./dummy.pkl")\n\n >>> unpickled_df = pd.read_pickle("./dummy.pkl")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove("./dummy.pkl")\n ' from pandas.io.pickle import to_pickle return to_pickle(self, path, compression=compression, protocol=protocol)
2,650,534,755,069,774,000
Pickle (serialize) object to file. Parameters ---------- path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl")
pandas/core/generic.py
to_pickle
kapilepatel/pandas
python
def to_pickle(self, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): '\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {\'infer\', \'gzip\', \'bz2\', \'zip\', \'xz\', None}, default \'infer\'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n\n .. versionadded:: 0.20.0\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values for this parameter depend on the version of Python. For\n Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a\n valid value. For Python >= 3.4, 4 is a valid value. A negative\n value for the protocol parameter is equivalent to setting its value\n to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html\n .. versionadded:: 0.21.0\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle("./dummy.pkl")\n\n >>> unpickled_df = pd.read_pickle("./dummy.pkl")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove("./dummy.pkl")\n ' from pandas.io.pickle import to_pickle return to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(self, excel=True, sep=None, **kwargs): "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n " from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
-5,960,258,345,454,710,000
Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_table. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6
pandas/core/generic.py
to_clipboard
kapilepatel/pandas
python
def to_clipboard(self, excel=True, sep=None, **kwargs): "\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n " from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self): "\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <http://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}).set_index(['date',\n ... 'animal'])\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n " try: import xarray except ImportError: raise ImportError('the xarray library is not installed\nyou can install via conda\nconda install xarray\nor via pip\npip install xarray\n') if (self.ndim == 1): return xarray.DataArray.from_series(self) elif (self.ndim == 2): return xarray.Dataset.from_dataframe(self) coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS] return xarray.DataArray(self, coords=coords)
-5,156,739,364,999,233,000
Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <http://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}).set_index(['date', ... 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15
pandas/core/generic.py
to_xarray
kapilepatel/pandas
python
def to_xarray(self): "\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <http://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}).set_index(['date',\n ... 'animal'])\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n " try: import xarray except ImportError: raise ImportError('the xarray library is not installed\nyou can install via conda\nconda install xarray\nor via pip\npip install xarray\n') if (self.ndim == 1): return xarray.DataArray.from_series(self) elif (self.ndim == 2): return xarray.Dataset.from_dataframe(self) coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS] return xarray.DataArray(self, coords=coords)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): "\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\usepackage{booktabs}.\n\n .. versionchanged:: 0.20.2\n Added to Series\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n\n .. versionadded:: 0.18.0\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a\n string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n " if (self.ndim == 1): self = self.to_frame() if (longtable is None): longtable = config.get_option('display.latex.longtable') if (escape is None): escape = config.get_option('display.latex.escape') if (multicolumn is None): multicolumn = config.get_option('display.latex.multicolumn') if (multicolumn_format is None): multicolumn_format = config.get_option('display.latex.multicolumn_format') if (multirow is None): multirow = config.get_option('display.latex.multirow') formatter = DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape, decimal=decimal) formatter.to_latex(column_format=column_format, longtable=longtable, encoding=encoding, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow) if (buf is None): return formatter.buf.getvalue()
7,407,435,962,317,687,000
Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires \usepackage{booktabs}. .. versionchanged:: 0.20.2 Added to Series Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a \usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. .. versionadded:: 0.18.0 multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module. .. versionadded:: 0.20.0 multicolumn_format : str, default 'l' The alignment for multicolumns, similar to `column_format` The default will be read from the config module. .. versionadded:: 0.20.0 multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. .. versionadded:: 0.20.0 Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
pandas/core/generic.py
to_latex
kapilepatel/pandas
python
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal='.', multicolumn=None, multicolumn_format=None, multirow=None): "\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice\n this into a LaTeX document. Requires \\usepackage{booktabs}.\n\n .. versionchanged:: 0.20.2\n Added to Series\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n\n .. versionadded:: 0.18.0\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n\n .. versionadded:: 0.20.0\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a\n string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n " if (self.ndim == 1): self = self.to_frame() if (longtable is None): longtable = config.get_option('display.latex.longtable') if (escape is None): escape = config.get_option('display.latex.escape') if (multicolumn is None): multicolumn = config.get_option('display.latex.multicolumn') if (multicolumn_format is None): multicolumn_format = config.get_option('display.latex.multicolumn_format') if (multirow is None): multirow = config.get_option('display.latex.multirow') formatter = DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape, decimal=decimal) formatter.to_latex(column_format=column_format, longtable=longtable, encoding=encoding, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow) if (buf is None): return formatter.buf.getvalue()
def to_csv(self, path_or_buf=None, sep=',', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, compression='infer', quoting=None, quotechar='"', line_terminator=None, chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): '\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n\n .. versionchanged:: 0.24.0\n\n Was previously named "path" for Series.\n\n sep : str, default \',\'\n String of length 1. Field delimiter for the output file.\n na_rep : str, default \'\'\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default \'w\'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to \'ascii\' on Python 2 and \'utf-8\' on Python 3.\n compression : str, default \'infer\'\n Compression mode among the following possible values: {\'infer\',\n \'gzip\', \'bz2\', \'zip\', \'xz\', None}. If \'infer\' and `path_or_buf`\n is path-like, then detect compression from the following\n extensions: \'.gz\', \'.bz2\', \'.zip\' or \'.xz\'. (otherwise no\n compression).\n\n .. versionchanged:: 0.24.0\n\n \'infer\' option added and set to default.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default \'\\"\'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called (\'\\n\' for linux, \'\\r\\n\' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n tupleize_cols : bool, default False\n Write MultiIndex columns as a list of tuples (if True) or in\n the new, expanded format, where each MultiIndex column is a row\n in the CSV (if False).\n\n .. deprecated:: 0.21.0\n This argument will be removed and will always write each row\n of the multi-index as a separate row in the CSV file.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default \'.\'\n Character recognized as decimal separator. E.g. use \',\' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Load an Excel file into a DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'name\': [\'Raphael\', \'Donatello\'],\n ... \'mask\': [\'red\', \'purple\'],\n ... \'weapon\': [\'sai\', \'bo staff\']})\n >>> df.to_csv(index=False)\n \'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n\'\n ' df = (self if isinstance(self, ABCDataFrame) else self.to_frame()) if (tupleize_cols is not None): warnings.warn("The 'tupleize_cols' parameter is deprecated and will be removed in a future version", FutureWarning, stacklevel=2) else: tupleize_cols = False from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter(df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if (path_or_buf is None): return formatter.path_or_buf.getvalue()
3,406,690,401,346,995,000
Write object to a comma-separated values (csv) file. .. versionchanged:: 0.24.0 The order of arguments for Series was changed. Parameters ---------- path_or_buf : str or file handle, default None File path or object, if None is provided the result is returned as a string. .. versionchanged:: 0.24.0 Was previously named "path" for Series. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. .. versionchanged:: 0.24.0 Previously defaulted to False for Series. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. compression : str, default 'infer' Compression mode among the following possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no compression). .. versionchanged:: 0.24.0 'infer' option added and set to default. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. line_terminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\n' for linux, '\r\n' for Windows, i.e.). .. versionchanged:: 0.24.0 chunksize : int or None Rows to write at a time. tupleize_cols : bool, default False Write MultiIndex columns as a list of tuples (if True) or in the new, expanded format, where each MultiIndex column is a row in the CSV (if False). .. deprecated:: 0.21.0 This argument will be removed and will always write each row of the multi-index as a separate row in the CSV file. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Load an Excel file into a DataFrame. Examples -------- >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
pandas/core/generic.py
to_csv
kapilepatel/pandas
python
def to_csv(self, path_or_buf=None, sep=',', na_rep=, float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, compression='infer', quoting=None, quotechar='"', line_terminator=None, chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): '\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string.\n\n .. versionchanged:: 0.24.0\n\n Was previously named "path" for Series.\n\n sep : str, default \',\'\n String of length 1. Field delimiter for the output file.\n na_rep : str, default \'\'\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default \'w\'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to \'ascii\' on Python 2 and \'utf-8\' on Python 3.\n compression : str, default \'infer\'\n Compression mode among the following possible values: {\'infer\',\n \'gzip\', \'bz2\', \'zip\', \'xz\', None}. If \'infer\' and `path_or_buf`\n is path-like, then detect compression from the following\n extensions: \'.gz\', \'.bz2\', \'.zip\' or \'.xz\'. (otherwise no\n compression).\n\n .. versionchanged:: 0.24.0\n\n \'infer\' option added and set to default.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default \'\\"\'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called (\'\\n\' for linux, \'\\r\\n\' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n tupleize_cols : bool, default False\n Write MultiIndex columns as a list of tuples (if True) or in\n the new, expanded format, where each MultiIndex column is a row\n in the CSV (if False).\n\n .. deprecated:: 0.21.0\n This argument will be removed and will always write each row\n of the multi-index as a separate row in the CSV file.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default \'.\'\n Character recognized as decimal separator. E.g. use \',\' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Load an Excel file into a DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'name\': [\'Raphael\', \'Donatello\'],\n ... \'mask\': [\'red\', \'purple\'],\n ... \'weapon\': [\'sai\', \'bo staff\']})\n >>> df.to_csv(index=False)\n \'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n\'\n ' df = (self if isinstance(self, ABCDataFrame) else self.to_frame()) if (tupleize_cols is not None): warnings.warn("The 'tupleize_cols' parameter is deprecated and will be removed in a future version", FutureWarning, stacklevel=2) else: tupleize_cols = False from pandas.io.formats.csvs import CSVFormatter formatter = CSVFormatter(df, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, compression=compression, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if (path_or_buf is None): return formatter.path_or_buf.getvalue()
@classmethod def _create_indexer(cls, name, indexer): 'Create an indexer like _name in the class.' if (getattr(cls, name, None) is None): _indexer = functools.partial(indexer, name) setattr(cls, name, property(_indexer, doc=indexer.__doc__))
-3,187,923,885,487,062,500
Create an indexer like _name in the class.
pandas/core/generic.py
_create_indexer
kapilepatel/pandas
python
@classmethod def _create_indexer(cls, name, indexer): if (getattr(cls, name, None) is None): _indexer = functools.partial(indexer, name) setattr(cls, name, property(_indexer, doc=indexer.__doc__))
def get(self, key, default=None): '\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n ' try: return self[key] except (KeyError, ValueError, IndexError): return default
7,196,264,157,167,940,000
Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object
pandas/core/generic.py
get
kapilepatel/pandas
python
def get(self, key, default=None): '\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n ' try: return self[key] except (KeyError, ValueError, IndexError): return default
def _get_item_cache(self, item): 'Return the cached item, item represents a label indexer.' cache = self._item_cache res = cache.get(item) if (res is None): values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res res._set_as_cached(item, self) res._is_copy = self._is_copy return res
-6,555,597,053,920,934,000
Return the cached item, item represents a label indexer.
pandas/core/generic.py
_get_item_cache
kapilepatel/pandas
python
def _get_item_cache(self, item): cache = self._item_cache res = cache.get(item) if (res is None): values = self._data.get(item) res = self._box_item_values(item, values) cache[item] = res res._set_as_cached(item, self) res._is_copy = self._is_copy return res
def _set_as_cached(self, item, cacher): 'Set the _cacher attribute on the calling object with a weakref to\n cacher.\n ' self._cacher = (item, weakref.ref(cacher))
-5,891,535,431,011,866,000
Set the _cacher attribute on the calling object with a weakref to cacher.
pandas/core/generic.py
_set_as_cached
kapilepatel/pandas
python
def _set_as_cached(self, item, cacher): 'Set the _cacher attribute on the calling object with a weakref to\n cacher.\n ' self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self): 'Reset the cacher.' if hasattr(self, '_cacher'): del self._cacher
-5,268,877,552,582,349,000
Reset the cacher.
pandas/core/generic.py
_reset_cacher
kapilepatel/pandas
python
def _reset_cacher(self): if hasattr(self, '_cacher'): del self._cacher
def _iget_item_cache(self, item): 'Return the cached item, item represents a positional indexer.' ax = self._info_axis if ax.is_unique: lower = self._get_item_cache(ax[item]) else: lower = self._take(item, axis=self._info_axis_number) return lower
-1,188,085,687,366,115,600
Return the cached item, item represents a positional indexer.
pandas/core/generic.py
_iget_item_cache
kapilepatel/pandas
python
def _iget_item_cache(self, item): ax = self._info_axis if ax.is_unique: lower = self._get_item_cache(ax[item]) else: lower = self._take(item, axis=self._info_axis_number) return lower
def _maybe_cache_changed(self, item, value): 'The object has called back to us saying maybe it has changed.\n ' self._data.set(item, value)
-3,534,873,743,098,020,000
The object has called back to us saying maybe it has changed.
pandas/core/generic.py
_maybe_cache_changed
kapilepatel/pandas
python
def _maybe_cache_changed(self, item, value): '\n ' self._data.set(item, value)
@property def _is_cached(self): 'Return boolean indicating if self is cached or not.' return (getattr(self, '_cacher', None) is not None)
-4,355,322,062,463,915,000
Return boolean indicating if self is cached or not.
pandas/core/generic.py
_is_cached
kapilepatel/pandas
python
@property def _is_cached(self): return (getattr(self, '_cacher', None) is not None)
def _get_cacher(self): 'return my cacher or None' cacher = getattr(self, '_cacher', None) if (cacher is not None): cacher = cacher[1]() return cacher
4,695,654,946,970,992,000
return my cacher or None
pandas/core/generic.py
_get_cacher
kapilepatel/pandas
python
def _get_cacher(self): cacher = getattr(self, '_cacher', None) if (cacher is not None): cacher = cacher[1]() return cacher
@property def _is_view(self): 'Return boolean indicating if self is view of another array ' return self._data.is_view
-2,310,442,685,064,172,000
Return boolean indicating if self is view of another array
pandas/core/generic.py
_is_view
kapilepatel/pandas
python
@property def _is_view(self): ' ' return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True): '\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : boolean, default False\n clear the item cache\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n ' cacher = getattr(self, '_cacher', None) if (cacher is not None): ref = cacher[1]() if (ref is None): del self._cacher else: try: ref._maybe_cache_changed(cacher[0], self) except Exception: pass if verify_is_copy: self._check_setitem_copy(stacklevel=5, t='referant') if clear: self._clear_item_cache()
-3,125,249,544,881,261,600
See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : boolean, default False clear the item cache verify_is_copy : boolean, default True provide is_copy checks
pandas/core/generic.py
_maybe_update_cacher
kapilepatel/pandas
python
def _maybe_update_cacher(self, clear=False, verify_is_copy=True): '\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : boolean, default False\n clear the item cache\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n ' cacher = getattr(self, '_cacher', None) if (cacher is not None): ref = cacher[1]() if (ref is None): del self._cacher else: try: ref._maybe_cache_changed(cacher[0], self) except Exception: pass if verify_is_copy: self._check_setitem_copy(stacklevel=5, t='referant') if clear: self._clear_item_cache()
def _slice(self, slobj, axis=0, kind=None): '\n Construct a slice of this container.\n\n kind parameter is maintained for compatibility with Series slicing.\n ' axis = self._get_block_manager_axis(axis) result = self._constructor(self._data.get_slice(slobj, axis=axis)) result = result.__finalize__(self) is_copy = ((axis != 0) or result._is_view) result._set_is_copy(self, copy=is_copy) return result
-6,803,673,270,064,336,000
Construct a slice of this container. kind parameter is maintained for compatibility with Series slicing.
pandas/core/generic.py
_slice
kapilepatel/pandas
python
def _slice(self, slobj, axis=0, kind=None): '\n Construct a slice of this container.\n\n kind parameter is maintained for compatibility with Series slicing.\n ' axis = self._get_block_manager_axis(axis) result = self._constructor(self._data.get_slice(slobj, axis=axis)) result = result.__finalize__(self) is_copy = ((axis != 0) or result._is_view) result._set_is_copy(self, copy=is_copy) return result
def _check_is_chained_assignment_possible(self): '\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n ' if (self._is_view and self._is_cached): ref = self._get_cacher() if ((ref is not None) and ref._is_mixed_type): self._check_setitem_copy(stacklevel=4, t='referant', force=True) return True elif self._is_copy: self._check_setitem_copy(stacklevel=4, t='referant') return False
7,285,224,792,968,785,000
Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting.
pandas/core/generic.py
_check_is_chained_assignment_possible
kapilepatel/pandas
python
def _check_is_chained_assignment_possible(self): '\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n ' if (self._is_view and self._is_cached): ref = self._get_cacher() if ((ref is not None) and ref._is_mixed_type): self._check_setitem_copy(stacklevel=4, t='referant', force=True) return True elif self._is_copy: self._check_setitem_copy(stacklevel=4, t='referant') return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): "\n\n Parameters\n ----------\n stacklevel : integer, default 4\n the level to show of the stack when the error is output\n t : string, the type of setting error\n force : boolean, default False\n if True, then force showing an error\n\n validate if we are doing a settitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n " if (force or self._is_copy): value = config.get_option('mode.chained_assignment') if (value is None): return try: gc.collect(2) if (not gc.get_referents(self._is_copy())): self._is_copy = None return except Exception: pass try: if (self._is_copy().shape == self.shape): self._is_copy = None return except Exception: pass if isinstance(self._is_copy, string_types): t = self._is_copy elif (t == 'referant'): t = '\nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy' else: t = '\nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy' if (value == 'raise'): raise com.SettingWithCopyError(t) elif (value == 'warn'): warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
6,389,951,531,743,159,000
Parameters ---------- stacklevel : integer, default 4 the level to show of the stack when the error is output t : string, the type of setting error force : boolean, default False if True, then force showing an error validate if we are doing a settitem on a chained copy. If you call this function, be sure to set the stacklevel such that the user will see the error *at the level of setting* It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not # generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a'
pandas/core/generic.py
_check_setitem_copy
kapilepatel/pandas
python
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): "\n\n Parameters\n ----------\n stacklevel : integer, default 4\n the level to show of the stack when the error is output\n t : string, the type of setting error\n force : boolean, default False\n if True, then force showing an error\n\n validate if we are doing a settitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n " if (force or self._is_copy): value = config.get_option('mode.chained_assignment') if (value is None): return try: gc.collect(2) if (not gc.get_referents(self._is_copy())): self._is_copy = None return except Exception: pass try: if (self._is_copy().shape == self.shape): self._is_copy = None return except Exception: pass if isinstance(self._is_copy, string_types): t = self._is_copy elif (t == 'referant'): t = '\nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy' else: t = '\nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy' if (value == 'raise'): raise com.SettingWithCopyError(t) elif (value == 'warn'): warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key): '\n Delete item\n ' deleted = False maybe_shortcut = False if (hasattr(self, 'columns') and isinstance(self.columns, MultiIndex)): try: maybe_shortcut = (key not in self.columns._engine) except TypeError: pass if maybe_shortcut: if (not isinstance(key, tuple)): key = (key,) for col in self.columns: if (isinstance(col, tuple) and (col[:len(key)] == key)): del self[col] deleted = True if (not deleted): self._data.delete(key) try: del self._item_cache[key] except KeyError: pass
1,421,860,525,802,677,000
Delete item
pandas/core/generic.py
__delitem__
kapilepatel/pandas
python
def __delitem__(self, key): '\n \n ' deleted = False maybe_shortcut = False if (hasattr(self, 'columns') and isinstance(self.columns, MultiIndex)): try: maybe_shortcut = (key not in self.columns._engine) except TypeError: pass if maybe_shortcut: if (not isinstance(key, tuple)): key = (key,) for col in self.columns: if (isinstance(col, tuple) and (col[:len(key)] == key)): del self[col] deleted = True if (not deleted): self._data.delete(key) try: del self._item_cache[key] except KeyError: pass
def _take(self, indices, axis=0, is_copy=True): '\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n This is the internal version of ``.take()`` and will contain a wider\n selection of parameters useful for internal use but not as suitable\n for public usage.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : int, default 0\n The axis on which to select elements. "0" means that we are\n selecting rows, "1" means that we are selecting columns, etc.\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n numpy.ndarray.take\n numpy.take\n ' self._consolidate_inplace() new_data = self._data.take(indices, axis=self._get_block_manager_axis(axis), verify=True) result = self._constructor(new_data).__finalize__(self) if is_copy: if (not result._get_axis(axis).equals(self._get_axis(axis))): result._set_is_copy(self) return result
-1,342,954,698,798,398,500
Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. This is the internal version of ``.take()`` and will contain a wider selection of parameters useful for internal use but not as suitable for public usage. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : int, default 0 The axis on which to select elements. "0" means that we are selecting rows, "1" means that we are selecting columns, etc. is_copy : bool, default True Whether to return a copy of the original object or not. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- numpy.ndarray.take numpy.take
pandas/core/generic.py
_take
kapilepatel/pandas
python
def _take(self, indices, axis=0, is_copy=True): '\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n This is the internal version of ``.take()`` and will contain a wider\n selection of parameters useful for internal use but not as suitable\n for public usage.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : int, default 0\n The axis on which to select elements. "0" means that we are\n selecting rows, "1" means that we are selecting columns, etc.\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n numpy.ndarray.take\n numpy.take\n ' self._consolidate_inplace() new_data = self._data.take(indices, axis=self._get_block_manager_axis(axis), verify=True) result = self._constructor(new_data).__finalize__(self) if is_copy: if (not result._get_axis(axis).equals(self._get_axis(axis))): result._set_is_copy(self) return result
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): "\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n convert : bool, default True\n Whether to convert negative indices into positive ones.\n For example, ``-1`` would map to the ``len(axis) - 1``.\n The conversions are similar to the behavior of indexing a\n regular Python list.\n\n .. deprecated:: 0.21.0\n In the future, negative indices will always be converted.\n\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n " if (convert is not None): msg = "The 'convert' parameter is deprecated and will be removed in a future version." warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_take(tuple(), kwargs) return self._take(indices, axis=axis, is_copy=is_copy)
3,953,250,733,073,923,000
Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. convert : bool, default True Whether to convert negative indices into positive ones. For example, ``-1`` would map to the ``len(axis) - 1``. The conversions are similar to the behavior of indexing a regular Python list. .. deprecated:: 0.21.0 In the future, negative indices will always be converted. is_copy : bool, default True Whether to return a copy of the original object or not. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5
pandas/core/generic.py
take
kapilepatel/pandas
python
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): "\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n convert : bool, default True\n Whether to convert negative indices into positive ones.\n For example, ``-1`` would map to the ``len(axis) - 1``.\n The conversions are similar to the behavior of indexing a\n regular Python list.\n\n .. deprecated:: 0.21.0\n In the future, negative indices will always be converted.\n\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n " if (convert is not None): msg = "The 'convert' parameter is deprecated and will be removed in a future version." warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_take(tuple(), kwargs) return self._take(indices, axis=axis, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True): "\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n " axis = self._get_axis_number(axis) labels = self._get_axis(axis) if (level is not None): (loc, new_ax) = labels.get_loc_level(key, level=level, drop_level=drop_level) indexer = ([slice(None)] * self.ndim) indexer[axis] = loc indexer = tuple(indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if (axis == 1): return self[key] self._consolidate_inplace() index = self.index if isinstance(index, MultiIndex): (loc, new_index) = self.index.get_loc_level(key, drop_level=drop_level) else: loc = self.index.get_loc(key) if isinstance(loc, np.ndarray): if (loc.dtype == np.bool_): (inds,) = loc.nonzero() return self._take(inds, axis=axis) else: return self._take(loc, axis=axis) if (not is_scalar(loc)): new_index = self.index[loc] if is_scalar(loc): new_values = self._data.fast_xs(loc) if ((not is_list_like(new_values)) or (self.ndim == 1)): return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype) else: result = self.iloc[loc] result.index = new_index result._set_is_copy(self, copy=(not result._is_view)) return result
699,185,656,904,247,600
Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64
pandas/core/generic.py
xs
kapilepatel/pandas
python
def xs(self, key, axis=0, level=None, drop_level=True): "\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n " axis = self._get_axis_number(axis) labels = self._get_axis(axis) if (level is not None): (loc, new_ax) = labels.get_loc_level(key, level=level, drop_level=drop_level) indexer = ([slice(None)] * self.ndim) indexer[axis] = loc indexer = tuple(indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if (axis == 1): return self[key] self._consolidate_inplace() index = self.index if isinstance(index, MultiIndex): (loc, new_index) = self.index.get_loc_level(key, drop_level=drop_level) else: loc = self.index.get_loc(key) if isinstance(loc, np.ndarray): if (loc.dtype == np.bool_): (inds,) = loc.nonzero() return self._take(inds, axis=axis) else: return self._take(loc, axis=axis) if (not is_scalar(loc)): new_index = self.index[loc] if is_scalar(loc): new_values = self._data.fast_xs(loc) if ((not is_list_like(new_values)) or (self.ndim == 1)): return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype) else: result = self.iloc[loc] result.index = new_index result._set_is_copy(self, copy=(not result._is_view)) return result
def select(self, crit, axis=0): '\n Return data corresponding to axis labels matching criteria.\n\n .. deprecated:: 0.21.0\n Use df.loc[df.index.map(crit)] to select via labels\n\n Parameters\n ----------\n crit : function\n To be called on each index (label). Should return True or False\n axis : int\n\n Returns\n -------\n selection : same type as caller\n ' warnings.warn("'select' is deprecated and will be removed in a future release. You can use .loc[labels.map(crit)] as a replacement", FutureWarning, stacklevel=2) axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis) if (len(axis_values) > 0): new_axis = axis_values[np.asarray([bool(crit(label)) for label in axis_values])] else: new_axis = axis_values return self.reindex(**{axis_name: new_axis})
-8,151,492,177,665,365,000
Return data corresponding to axis labels matching criteria. .. deprecated:: 0.21.0 Use df.loc[df.index.map(crit)] to select via labels Parameters ---------- crit : function To be called on each index (label). Should return True or False axis : int Returns ------- selection : same type as caller
pandas/core/generic.py
select
kapilepatel/pandas
python
def select(self, crit, axis=0): '\n Return data corresponding to axis labels matching criteria.\n\n .. deprecated:: 0.21.0\n Use df.loc[df.index.map(crit)] to select via labels\n\n Parameters\n ----------\n crit : function\n To be called on each index (label). Should return True or False\n axis : int\n\n Returns\n -------\n selection : same type as caller\n ' warnings.warn("'select' is deprecated and will be removed in a future release. You can use .loc[labels.map(crit)] as a replacement", FutureWarning, stacklevel=2) axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis) if (len(axis_values) > 0): new_axis = axis_values[np.asarray([bool(crit(label)) for label in axis_values])] else: new_axis = axis_values return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None): "\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n " d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance) return self.reindex(**d)
-2,882,357,771,167,848,400
Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium
pandas/core/generic.py
reindex_like
kapilepatel/pandas
python
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None): "\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n " d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance) return self.reindex(**d)
def _drop_axis(self, labels, axis, level=None, errors='raise'): "\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n " axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis = self._get_axis(axis) if axis.is_unique: if (level is not None): if (not isinstance(axis, MultiIndex)): raise AssertionError('axis must be a MultiIndex') new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) result = self.reindex(**{axis_name: new_axis}) else: labels = ensure_object(com.index_labels_to_array(labels)) if (level is not None): if (not isinstance(axis, MultiIndex)): raise AssertionError('axis must be a MultiIndex') indexer = (~ axis.get_level_values(level).isin(labels)) if ((errors == 'raise') and indexer.all()): raise KeyError('{} not found in axis'.format(labels)) else: indexer = (~ axis.isin(labels)) labels_missing = (axis.get_indexer_for(labels) == (- 1)).any() if ((errors == 'raise') and labels_missing): raise KeyError('{} not found in axis'.format(labels)) slicer = ([slice(None)] * self.ndim) slicer[self._get_axis_number(axis_name)] = indexer result = self.loc[tuple(slicer)] return result
8,765,291,950,766,307,000
Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped.
pandas/core/generic.py
_drop_axis
kapilepatel/pandas
python
def _drop_axis(self, labels, axis, level=None, errors='raise'): "\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n " axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis = self._get_axis(axis) if axis.is_unique: if (level is not None): if (not isinstance(axis, MultiIndex)): raise AssertionError('axis must be a MultiIndex') new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) result = self.reindex(**{axis_name: new_axis}) else: labels = ensure_object(com.index_labels_to_array(labels)) if (level is not None): if (not isinstance(axis, MultiIndex)): raise AssertionError('axis must be a MultiIndex') indexer = (~ axis.get_level_values(level).isin(labels)) if ((errors == 'raise') and indexer.all()): raise KeyError('{} not found in axis'.format(labels)) else: indexer = (~ axis.isin(labels)) labels_missing = (axis.get_indexer_for(labels) == (- 1)).any() if ((errors == 'raise') and labels_missing): raise KeyError('{} not found in axis'.format(labels)) slicer = ([slice(None)] * self.ndim) slicer[self._get_axis_number(axis_name)] = indexer result = self.loc[tuple(slicer)] return result
def _update_inplace(self, result, verify_is_copy=True): '\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n ' self._reset_cache() self._clear_item_cache() self._data = getattr(result, '_data', result) self._maybe_update_cacher(verify_is_copy=verify_is_copy)
4,356,424,455,077,415,400
Replace self internals with result. Parameters ---------- verify_is_copy : boolean, default True provide is_copy checks
pandas/core/generic.py
_update_inplace
kapilepatel/pandas
python
def _update_inplace(self, result, verify_is_copy=True): '\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : boolean, default True\n provide is_copy checks\n\n ' self._reset_cache() self._clear_item_cache() self._data = getattr(result, '_data', result) self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix): "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " f = functools.partial('{prefix}{}'.format, prefix=prefix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
-2,298,415,598,138,723,300
Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6
pandas/core/generic.py
add_prefix
kapilepatel/pandas
python
def add_prefix(self, prefix): "\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " f = functools.partial('{prefix}{}'.format, prefix=prefix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
def add_suffix(self, suffix): "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " f = functools.partial('{}{suffix}'.format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
6,699,270,932,651,102,000
Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6
pandas/core/generic.py
add_suffix
kapilepatel/pandas
python
def add_suffix(self, suffix): "\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n " f = functools.partial('{}{suffix}'.format, suffix=suffix) mapper = {self._info_axis_name: f} return self.rename(**mapper)
def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): "\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n " raise NotImplementedError('sort_values has not been implemented on Panel or Panel4D objects.')
-6,910,440,778,229,990,000
Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1
pandas/core/generic.py
sort_values
kapilepatel/pandas
python
def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): "\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n " raise NotImplementedError('sort_values has not been implemented on Panel or Panel4D objects.')
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): "\n Sort object by labels (along an axis).\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool, default True\n Sort ascending vs. descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted index if inplace=False, None otherwise.\n " inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) labels = self._get_axis(axis) if (level is not None): raise NotImplementedError('level is not implemented') if inplace: raise NotImplementedError('inplace is not implemented') sort_index = labels.argsort() if (not ascending): sort_index = sort_index[::(- 1)] new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis})
4,111,235,535,888,698,000
Sort object by labels (along an axis). Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis along which to sort. The value 0 identifies the rows, and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). ascending : bool, default True Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted index if inplace=False, None otherwise.
pandas/core/generic.py
sort_index
kapilepatel/pandas
python
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): "\n Sort object by labels (along an axis).\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool, default True\n Sort ascending vs. descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted index if inplace=False, None otherwise.\n " inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) labels = self._get_axis(axis) if (level is not None): raise NotImplementedError('level is not implemented') if inplace: raise NotImplementedError('inplace is not implemented') sort_index = labels.argsort() if (not ascending): sort_index = sort_index[::(- 1)] new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis})
def reindex(self, *args, **kwargs): '\n Conform %(klass)s to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data\n %(optional_axis)s\n method : {None, \'backfill\'/\'bfill\', \'pad\'/\'ffill\', \'nearest\'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don\'t fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n "compatible" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index\'s type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = [\'Firefox\', \'Chrome\', \'Safari\', \'IE10\', \'Konqueror\']\n >>> df = pd.DataFrame({\n ... \'http_status\': [200,200,404,404,301],\n ... \'response_time\': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= [\'Safari\', \'Iceweasel\', \'Comodo Dragon\', \'IE10\',\n ... \'Chrome\']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value=\'missing\')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=[\'http_status\', \'user_agent\'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use "axis-style" keyword arguments\n\n >>> df.reindex([\'http_status\', \'user_agent\'], axis="columns")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range(\'1/1/2010\', periods=6, freq=\'D\')\n >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range(\'12/29/2009\', periods=10, freq=\'D\')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, \'2009-12-29\') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method=\'bfill\')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n ' (axes, kwargs) = self._construct_axes_from_arguments(args, kwargs) method = missing.clean_reindex_fill_method(kwargs.pop('method', None)) level = kwargs.pop('level', None) copy = kwargs.pop('copy', True) limit = kwargs.pop('limit', None) tolerance = kwargs.pop('tolerance', None) fill_value = kwargs.pop('fill_value', None) kwargs.pop('axis', None) if kwargs: raise TypeError('reindex() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0])) self._consolidate_inplace() if all((self._get_axis(axis).identical(ax) for (axis, ax) in axes.items() if (ax is not None))): if copy: return self.copy() return self if self._needs_reindex_multi(axes, method, level): try: return self._reindex_multi(axes, copy, fill_value) except Exception: pass return self._reindex_axes(axes, level, limit, tolerance, method, fill_value, copy).__finalize__(self)
34,584,110,546,635,700
Conform %(klass)s to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- %(optional_labels)s %(axes)s : array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data %(optional_axis)s method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- %(klass)s with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({ ... 'http_status': [200,200,404,404,301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more.
pandas/core/generic.py
reindex
kapilepatel/pandas
python
def reindex(self, *args, **kwargs): '\n Conform %(klass)s to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data\n %(optional_axis)s\n method : {None, \'backfill\'/\'bfill\', \'pad\'/\'ffill\', \'nearest\'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don\'t fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n "compatible" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index\'s type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = [\'Firefox\', \'Chrome\', \'Safari\', \'IE10\', \'Konqueror\']\n >>> df = pd.DataFrame({\n ... \'http_status\': [200,200,404,404,301],\n ... \'response_time\': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= [\'Safari\', \'Iceweasel\', \'Comodo Dragon\', \'IE10\',\n ... \'Chrome\']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value=\'missing\')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=[\'http_status\', \'user_agent\'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use "axis-style" keyword arguments\n\n >>> df.reindex([\'http_status\', \'user_agent\'], axis="columns")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range(\'1/1/2010\', periods=6, freq=\'D\')\n >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range(\'12/29/2009\', periods=10, freq=\'D\')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, \'2009-12-29\') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method=\'bfill\')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n ' (axes, kwargs) = self._construct_axes_from_arguments(args, kwargs) method = missing.clean_reindex_fill_method(kwargs.pop('method', None)) level = kwargs.pop('level', None) copy = kwargs.pop('copy', True) limit = kwargs.pop('limit', None) tolerance = kwargs.pop('tolerance', None) fill_value = kwargs.pop('fill_value', None) kwargs.pop('axis', None) if kwargs: raise TypeError('reindex() got an unexpected keyword argument "{0}"'.format(list(kwargs.keys())[0])) self._consolidate_inplace() if all((self._get_axis(axis).identical(ax) for (axis, ax) in axes.items() if (ax is not None))): if copy: return self.copy() return self if self._needs_reindex_multi(axes, method, level): try: return self._reindex_multi(axes, copy, fill_value) except Exception: pass return self._reindex_axes(axes, level, limit, tolerance, method, fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): 'Perform the reindex for all the axes.' obj = self for a in self._AXIS_ORDERS: labels = axes[a] if (labels is None): continue ax = self._get_axis(a) (new_index, indexer) = ax.reindex(labels, level=level, limit=limit, tolerance=tolerance, method=method) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False) return obj
4,721,573,882,537,585,000
Perform the reindex for all the axes.
pandas/core/generic.py
_reindex_axes
kapilepatel/pandas
python
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): obj = self for a in self._AXIS_ORDERS: labels = axes[a] if (labels is None): continue ax = self._get_axis(a) (new_index, indexer) = ax.reindex(labels, level=level, limit=limit, tolerance=tolerance, method=method) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False) return obj
def _needs_reindex_multi(self, axes, method, level): 'Check if we do need a multi reindex.' return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and (method is None) and (level is None) and (not self._is_mixed_type))
2,577,472,411,366,709,000
Check if we do need a multi reindex.
pandas/core/generic.py
_needs_reindex_multi
kapilepatel/pandas
python
def _needs_reindex_multi(self, axes, method, level): return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and (method is None) and (level is None) and (not self._is_mixed_type))
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, allow_dups=False): 'allow_dups indicates an internal call here ' new_data = self._data for axis in sorted(reindexers.keys()): (index, indexer) = reindexers[axis] baxis = self._get_block_manager_axis(axis) if (index is None): continue index = ensure_index(index) if (indexer is not None): indexer = ensure_int64(indexer) new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy) if (copy and (new_data is self._data)): new_data = new_data.copy() return self._constructor(new_data).__finalize__(self)
6,166,168,766,132,388,000
allow_dups indicates an internal call here
pandas/core/generic.py
_reindex_with_indexers
kapilepatel/pandas
python
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, allow_dups=False): ' ' new_data = self._data for axis in sorted(reindexers.keys()): (index, indexer) = reindexers[axis] baxis = self._get_block_manager_axis(axis) if (index is None): continue index = ensure_index(index) if (indexer is not None): indexer = ensure_int64(indexer) new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy) if (copy and (new_data is self._data)): new_data = new_data.copy() return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None): '\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n List of axis to restrict to (must not all be present).\n like : string\n Keep axis where "arg in col == True".\n regex : string (regular expression)\n Keep axis with re.search(regex, col) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n \'index\' for Series, \'columns\' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),\n ... index=[\'mouse\', \'rabbit\'],\n ... columns=[\'one\', \'two\', \'three\'])\n\n >>> # select columns by name\n >>> df.filter(items=[\'one\', \'three\'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex=\'e$\', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing \'bbi\'\n >>> df.filter(like=\'bbi\', axis=0)\n one two three\n rabbit 4 5 6\n ' import re nkw = com.count_not_none(items, like, regex) if (nkw > 1): raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive') if (axis is None): axis = self._info_axis_name labels = self._get_axis(axis) if (items is not None): name = self._get_axis_name(axis) return self.reindex(**{name: [r for r in items if (r in labels)]}) elif like: def f(x): return (like in to_str(x)) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x): return (matcher.search(to_str(x)) is not None) matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`')
-4,145,134,183,278,041,000
Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like List of axis to restrict to (must not all be present). like : string Keep axis where "arg in col == True". regex : string (regular expression) Keep axis with re.search(regex, col) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6
pandas/core/generic.py
filter
kapilepatel/pandas
python
def filter(self, items=None, like=None, regex=None, axis=None): '\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n List of axis to restrict to (must not all be present).\n like : string\n Keep axis where "arg in col == True".\n regex : string (regular expression)\n Keep axis with re.search(regex, col) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n \'index\' for Series, \'columns\' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),\n ... index=[\'mouse\', \'rabbit\'],\n ... columns=[\'one\', \'two\', \'three\'])\n\n >>> # select columns by name\n >>> df.filter(items=[\'one\', \'three\'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex=\'e$\', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing \'bbi\'\n >>> df.filter(like=\'bbi\', axis=0)\n one two three\n rabbit 4 5 6\n ' import re nkw = com.count_not_none(items, like, regex) if (nkw > 1): raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive') if (axis is None): axis = self._info_axis_name labels = self._get_axis(axis) if (items is not None): name = self._get_axis_name(axis) return self.reindex(**{name: [r for r in items if (r in labels)]}) elif like: def f(x): return (like in to_str(x)) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x): return (matcher.search(to_str(x)) is not None) matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5): "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n " return self.iloc[:n]
2,804,147,561,031,767,000
Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon
pandas/core/generic.py
head
kapilepatel/pandas
python
def head(self, n=5): "\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n " return self.iloc[:n]
def tail(self, n=5): "\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n " if (n == 0): return self.iloc[0:0] return self.iloc[(- n):]
8,234,679,230,865,114,000
Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra
pandas/core/generic.py
tail
kapilepatel/pandas
python
def tail(self, n=5): "\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n " if (n == 0): return self.iloc[0:0] return self.iloc[(- n):]
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): "\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Sample with or without replacement.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : int or string, optional\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames, 1 for Panels).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) axis_length = self.shape[axis] rs = com.random_state(random_state) if (weights is not None): if isinstance(weights, pd.Series): weights = weights.reindex(self.axes[axis]) if isinstance(weights, string_types): if isinstance(self, pd.DataFrame): if (axis == 0): try: weights = self[weights] except KeyError: raise KeyError('String passed to weights not a valid column') else: raise ValueError('Strings can only be passed to weights when sampling from rows on a DataFrame') else: raise ValueError('Strings cannot be passed as weights when sampling from a Series or Panel.') weights = pd.Series(weights, dtype='float64') if (len(weights) != axis_length): raise ValueError('Weights and axis to be sampled must be of same length') if ((weights == np.inf).any() or (weights == (- np.inf)).any()): raise ValueError('weight vector may not include `inf` values') if (weights < 0).any(): raise ValueError('weight vector many not include negative values') weights = weights.fillna(0) if (weights.sum() != 1): if (weights.sum() != 0): weights = (weights / weights.sum()) else: raise ValueError('Invalid weights: weights sum to zero') weights = weights.values if ((n is None) and (frac is None)): n = 1 elif ((n is not None) and (frac is None) and ((n % 1) != 0)): raise ValueError('Only integers accepted as `n` values') elif ((n is None) and (frac is not None)): n = int(round((frac * axis_length))) elif ((n is not None) and (frac is not None)): raise ValueError('Please enter a value for `frac` OR `n`, not both') if (n < 0): raise ValueError('A negative number of rows requested. Please provide positive value.') locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False)
2,898,805,098,321,995,300
Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Sample with or without replacement. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- numpy.random.choice: Generates a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8
pandas/core/generic.py
sample
kapilepatel/pandas
python
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): "\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Sample with or without replacement.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : int or string, optional\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames, 1 for Panels).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) axis_length = self.shape[axis] rs = com.random_state(random_state) if (weights is not None): if isinstance(weights, pd.Series): weights = weights.reindex(self.axes[axis]) if isinstance(weights, string_types): if isinstance(self, pd.DataFrame): if (axis == 0): try: weights = self[weights] except KeyError: raise KeyError('String passed to weights not a valid column') else: raise ValueError('Strings can only be passed to weights when sampling from rows on a DataFrame') else: raise ValueError('Strings cannot be passed as weights when sampling from a Series or Panel.') weights = pd.Series(weights, dtype='float64') if (len(weights) != axis_length): raise ValueError('Weights and axis to be sampled must be of same length') if ((weights == np.inf).any() or (weights == (- np.inf)).any()): raise ValueError('weight vector may not include `inf` values') if (weights < 0).any(): raise ValueError('weight vector many not include negative values') weights = weights.fillna(0) if (weights.sum() != 1): if (weights.sum() != 0): weights = (weights / weights.sum()) else: raise ValueError('Invalid weights: weights sum to zero') weights = weights.values if ((n is None) and (frac is None)): n = 1 elif ((n is not None) and (frac is None) and ((n % 1) != 0)): raise ValueError('Only integers accepted as `n` values') elif ((n is None) and (frac is not None)): n = int(round((frac * axis_length))) elif ((n is not None) and (frac is not None)): raise ValueError('Please enter a value for `frac` OR `n`, not both') if (n < 0): raise ValueError('A negative number of rows requested. Please provide positive value.') locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False)
def __finalize__(self, other, method=None, **kwargs): '\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n ' if isinstance(other, NDFrame): for name in self._metadata: object.__setattr__(self, name, getattr(other, name, None)) return self
3,008,792,019,042,377,000
Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : optional, a passed method name ; possibly to take different types of propagation actions based on this
pandas/core/generic.py
__finalize__
kapilepatel/pandas
python
def __finalize__(self, other, method=None, **kwargs): '\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n ' if isinstance(other, NDFrame): for name in self._metadata: object.__setattr__(self, name, getattr(other, name, None)) return self
def __getattr__(self, name): 'After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n ' if ((name in self._internal_names_set) or (name in self._metadata) or (name in self._accessors)): return object.__getattribute__(self, name) else: if self._info_axis._can_hold_identifiers_and_holds_name(name): return self[name] return object.__getattribute__(self, name)
1,779,493,466,214,910,700
After regular attribute access, try looking up the name This allows simpler access to columns for interactive use.
pandas/core/generic.py
__getattr__
kapilepatel/pandas
python
def __getattr__(self, name): 'After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n ' if ((name in self._internal_names_set) or (name in self._metadata) or (name in self._accessors)): return object.__getattribute__(self, name) else: if self._info_axis._can_hold_identifiers_and_holds_name(name): return self[name] return object.__getattribute__(self, name)
def __setattr__(self, name, value): 'After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n ' try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass if (name in self._internal_names_set): object.__setattr__(self, name, value) elif (name in self._metadata): object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif (name in self._info_axis): self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if (isinstance(self, ABCDataFrame) and is_list_like(value)): warnings.warn("Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access", stacklevel=2) object.__setattr__(self, name, value)
-2,991,854,804,421,538,000
After regular attribute access, try setting the name This allows simpler access to columns for interactive use.
pandas/core/generic.py
__setattr__
kapilepatel/pandas
python
def __setattr__(self, name, value): 'After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n ' try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass if (name in self._internal_names_set): object.__setattr__(self, name, value) elif (name in self._metadata): object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif (name in self._info_axis): self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if (isinstance(self, ABCDataFrame) and is_list_like(value)): warnings.warn("Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access", stacklevel=2) object.__setattr__(self, name, value)
def _dir_additions(self): " add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n " additions = {c for c in self._info_axis.unique(level=0)[:100] if (isinstance(c, string_types) and isidentifier(c))} return super(NDFrame, self)._dir_additions().union(additions)
3,693,849,406,929,341,000
add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used.
pandas/core/generic.py
_dir_additions
kapilepatel/pandas
python
def _dir_additions(self): " add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n " additions = {c for c in self._info_axis.unique(level=0)[:100] if (isinstance(c, string_types) and isidentifier(c))} return super(NDFrame, self)._dir_additions().union(additions)
def _protect_consolidate(self, f): 'Consolidate _data -- if the blocks have changed, then clear the\n cache\n ' blocks_before = len(self._data.blocks) result = f() if (len(self._data.blocks) != blocks_before): self._clear_item_cache() return result
-8,495,753,483,392,909,000
Consolidate _data -- if the blocks have changed, then clear the cache
pandas/core/generic.py
_protect_consolidate
kapilepatel/pandas
python
def _protect_consolidate(self, f): 'Consolidate _data -- if the blocks have changed, then clear the\n cache\n ' blocks_before = len(self._data.blocks) result = f() if (len(self._data.blocks) != blocks_before): self._clear_item_cache() return result
def _consolidate_inplace(self): 'Consolidate data in place and return None' def f(): self._data = self._data.consolidate() self._protect_consolidate(f)
-4,123,879,335,490,513,000
Consolidate data in place and return None
pandas/core/generic.py
_consolidate_inplace
kapilepatel/pandas
python
def _consolidate_inplace(self): def f(): self._data = self._data.consolidate() self._protect_consolidate(f)
def _consolidate(self, inplace=False): '\n Compute NDFrame with "consolidated" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : boolean, default False\n If False return new object, otherwise modify existing object\n\n Returns\n -------\n consolidated : same type as caller\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._consolidate_inplace() else: f = (lambda : self._data.consolidate()) cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self)
5,301,103,510,948,307,000
Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Parameters ---------- inplace : boolean, default False If False return new object, otherwise modify existing object Returns ------- consolidated : same type as caller
pandas/core/generic.py
_consolidate
kapilepatel/pandas
python
def _consolidate(self, inplace=False): '\n Compute NDFrame with "consolidated" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : boolean, default False\n If False return new object, otherwise modify existing object\n\n Returns\n -------\n consolidated : same type as caller\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._consolidate_inplace() else: f = (lambda : self._data.consolidate()) cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self)
def _check_inplace_setting(self, value): ' check whether we allow in-place setting with this type of value ' if self._is_mixed_type: if (not self._is_numeric_mixed_type): try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on mixed-types with a non np.nan value') return True
7,890,420,370,806,967,000
check whether we allow in-place setting with this type of value
pandas/core/generic.py
_check_inplace_setting
kapilepatel/pandas
python
def _check_inplace_setting(self, value): ' ' if self._is_mixed_type: if (not self._is_numeric_mixed_type): try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on mixed-types with a non np.nan value') return True
def as_matrix(self, columns=None): "\n Convert the frame to its Numpy-array representation.\n\n .. deprecated:: 0.23.0\n Use :meth:`DataFrame.values` instead.\n\n Parameters\n ----------\n columns : list, optional, default:None\n If None, return all columns, otherwise, returns specified columns.\n\n Returns\n -------\n values : ndarray\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n See Also\n --------\n DataFrame.values\n\n Notes\n -----\n Return is NOT a Numpy-matrix, rather, a Numpy-array.\n\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcase to\n int32. By numpy.find_common_type convention, mixing int64 and uint64\n will result in a float64 dtype.\n\n This method is provided for backwards compatibility. Generally,\n it is recommended to use '.values'.\n " warnings.warn('Method .as_matrix will be removed in a future version. Use .values instead.', FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
5,670,880,082,385,726,000
Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'.
pandas/core/generic.py
as_matrix
kapilepatel/pandas
python
def as_matrix(self, columns=None): "\n Convert the frame to its Numpy-array representation.\n\n .. deprecated:: 0.23.0\n Use :meth:`DataFrame.values` instead.\n\n Parameters\n ----------\n columns : list, optional, default:None\n If None, return all columns, otherwise, returns specified columns.\n\n Returns\n -------\n values : ndarray\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n See Also\n --------\n DataFrame.values\n\n Notes\n -----\n Return is NOT a Numpy-matrix, rather, a Numpy-array.\n\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcase to\n int32. By numpy.find_common_type convention, mixing int64 and uint64\n will result in a float64 dtype.\n\n This method is provided for backwards compatibility. Generally,\n it is recommended to use '.values'.\n " warnings.warn('Method .as_matrix will be removed in a future version. Use .values instead.', FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
@property def values(self): "\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n " self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED)
8,607,021,873,946,367,000
Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. DataFrame.index : Retrieve the index labels. DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]], dtype=int64) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object)
pandas/core/generic.py
values
kapilepatel/pandas
python
@property def values(self): "\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n " self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED)
@property def _values(self): 'internal implementation' return self.values
-4,509,188,480,570,620,400
internal implementation
pandas/core/generic.py
_values
kapilepatel/pandas
python
@property def _values(self): return self.values
def get_values(self): '\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'a\': [1, 2], \'b\': [True, False],\n ... \'c\': [1.0, 2.0]})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n\n >>> df.get_values()\n array([[1, True, 1.0], [2, False, 2.0]], dtype=object)\n\n >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),\n ... "c": [1.0, 2.0, 3.0]})\n >>> df\n a c\n 0 1.0 1.0\n 1 NaN 2.0\n 2 NaN 3.0\n\n >>> df.get_values()\n array([[ 1., 1.],\n [nan, 2.],\n [nan, 3.]])\n ' return self.values
4,427,257,201,389,257,000
Return an ndarray after converting sparse values to dense. This is the same as ``.values`` for non-sparse data. For sparse data contained in a `SparseArray`, the data are first converted to a dense representation. Returns ------- numpy.ndarray Numpy representation of DataFrame. See Also -------- values : Numpy representation of DataFrame. SparseArray : Container for sparse data. Examples -------- >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False], ... 'c': [1.0, 2.0]}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 >>> df.get_values() array([[1, True, 1.0], [2, False, 2.0]], dtype=object) >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]), ... "c": [1.0, 2.0, 3.0]}) >>> df a c 0 1.0 1.0 1 NaN 2.0 2 NaN 3.0 >>> df.get_values() array([[ 1., 1.], [nan, 2.], [nan, 3.]])
pandas/core/generic.py
get_values
kapilepatel/pandas
python
def get_values(self): '\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'a\': [1, 2], \'b\': [True, False],\n ... \'c\': [1.0, 2.0]})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n\n >>> df.get_values()\n array([[1, True, 1.0], [2, False, 2.0]], dtype=object)\n\n >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),\n ... "c": [1.0, 2.0, 3.0]})\n >>> df\n a c\n 0 1.0 1.0\n 1 NaN 2.0\n 2 NaN 3.0\n\n >>> df.get_values()\n array([[ 1., 1.],\n [nan, 2.],\n [nan, 3.]])\n ' return self.values
def get_dtype_counts(self): "\n Return counts of unique dtypes in this object.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_dtype_counts()\n float64 1\n int64 1\n object 1\n dtype: int64\n " from pandas import Series return Series(self._data.get_dtype_counts())
340,209,381,645,793,860
Return counts of unique dtypes in this object. Returns ------- dtype : Series Series with the count of columns with each dtype. See Also -------- dtypes : Return the dtypes in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_dtype_counts() float64 1 int64 1 object 1 dtype: int64
pandas/core/generic.py
get_dtype_counts
kapilepatel/pandas
python
def get_dtype_counts(self): "\n Return counts of unique dtypes in this object.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_dtype_counts()\n float64 1\n int64 1\n object 1\n dtype: int64\n " from pandas import Series return Series(self._data.get_dtype_counts())
def get_ftype_counts(self): "\n Return counts of unique ftypes in this object.\n\n .. deprecated:: 0.23.0\n\n This is useful for SparseDataFrame or for DataFrames containing\n sparse arrays.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each type and\n sparsity (dense/sparse).\n\n See Also\n --------\n ftypes : Return ftypes (indication of sparse/dense and dtype) in\n this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_ftype_counts() # doctest: +SKIP\n float64:dense 1\n int64:dense 1\n object:dense 1\n dtype: int64\n " warnings.warn('get_ftype_counts is deprecated and will be removed in a future version', FutureWarning, stacklevel=2) from pandas import Series return Series(self._data.get_ftype_counts())
1,105,224,838,373,287,300
Return counts of unique ftypes in this object. .. deprecated:: 0.23.0 This is useful for SparseDataFrame or for DataFrames containing sparse arrays. Returns ------- dtype : Series Series with the count of columns with each type and sparsity (dense/sparse). See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_ftype_counts() # doctest: +SKIP float64:dense 1 int64:dense 1 object:dense 1 dtype: int64
pandas/core/generic.py
get_ftype_counts
kapilepatel/pandas
python
def get_ftype_counts(self): "\n Return counts of unique ftypes in this object.\n\n .. deprecated:: 0.23.0\n\n This is useful for SparseDataFrame or for DataFrames containing\n sparse arrays.\n\n Returns\n -------\n dtype : Series\n Series with the count of columns with each type and\n sparsity (dense/sparse).\n\n See Also\n --------\n ftypes : Return ftypes (indication of sparse/dense and dtype) in\n this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]\n >>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])\n >>> df\n str int float\n 0 a 1 1.0\n 1 b 2 2.0\n 2 c 3 3.0\n\n >>> df.get_ftype_counts() # doctest: +SKIP\n float64:dense 1\n int64:dense 1\n object:dense 1\n dtype: int64\n " warnings.warn('get_ftype_counts is deprecated and will be removed in a future version', FutureWarning, stacklevel=2) from pandas import Series return Series(self._data.get_ftype_counts())
@property def dtypes(self): "\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n See Also\n --------\n DataFrame.ftypes : Dtype and sparsity information.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n " from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
-9,017,179,737,222,990,000
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object
pandas/core/generic.py
dtypes
kapilepatel/pandas
python
@property def dtypes(self): "\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n See Also\n --------\n DataFrame.ftypes : Dtype and sparsity information.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n " from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
@property def ftypes(self): "\n Return the ftypes (indication of sparse/dense and dtype) in DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type and indication of sparse/dense of each column.\n\n See Also\n --------\n DataFrame.dtypes: Series with just dtype information.\n SparseDataFrame : Container for sparse tabular data.\n\n Notes\n -----\n Sparse data should have the same dtypes as its dense representation.\n\n Examples\n --------\n >>> arr = np.random.RandomState(0).randn(100, 4)\n >>> arr[arr < .8] = np.nan\n >>> pd.DataFrame(arr).ftypes\n 0 float64:dense\n 1 float64:dense\n 2 float64:dense\n 3 float64:dense\n dtype: object\n\n >>> pd.SparseDataFrame(arr).ftypes\n 0 float64:sparse\n 1 float64:sparse\n 2 float64:sparse\n 3 float64:sparse\n dtype: object\n " from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
-5,507,165,227,604,726,000
Return the ftypes (indication of sparse/dense and dtype) in DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type and indication of sparse/dense of each column. See Also -------- DataFrame.dtypes: Series with just dtype information. SparseDataFrame : Container for sparse tabular data. Notes ----- Sparse data should have the same dtypes as its dense representation. Examples -------- >>> arr = np.random.RandomState(0).randn(100, 4) >>> arr[arr < .8] = np.nan >>> pd.DataFrame(arr).ftypes 0 float64:dense 1 float64:dense 2 float64:dense 3 float64:dense dtype: object >>> pd.SparseDataFrame(arr).ftypes 0 float64:sparse 1 float64:sparse 2 float64:sparse 3 float64:sparse dtype: object
pandas/core/generic.py
ftypes
kapilepatel/pandas
python
@property def ftypes(self): "\n Return the ftypes (indication of sparse/dense and dtype) in DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type and indication of sparse/dense of each column.\n\n See Also\n --------\n DataFrame.dtypes: Series with just dtype information.\n SparseDataFrame : Container for sparse tabular data.\n\n Notes\n -----\n Sparse data should have the same dtypes as its dense representation.\n\n Examples\n --------\n >>> arr = np.random.RandomState(0).randn(100, 4)\n >>> arr[arr < .8] = np.nan\n >>> pd.DataFrame(arr).ftypes\n 0 float64:dense\n 1 float64:dense\n 2 float64:dense\n 3 float64:dense\n dtype: object\n\n >>> pd.SparseDataFrame(arr).ftypes\n 0 float64:sparse\n 1 float64:sparse\n 2 float64:sparse\n 3 float64:sparse\n dtype: object\n " from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
def as_blocks(self, copy=True): '\n Convert the frame to a dict of dtype -> Constructor Types that each has\n a homogeneous dtype.\n\n .. deprecated:: 0.21.0\n\n NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in\n as_matrix)\n\n Parameters\n ----------\n copy : boolean, default True\n\n Returns\n -------\n values : a dict of dtype -> Constructor Types\n ' warnings.warn('as_blocks is deprecated and will be removed in a future version', FutureWarning, stacklevel=2) return self._to_dict_of_blocks(copy=copy)
-1,815,616,619,229,013,200
Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. .. deprecated:: 0.21.0 NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> Constructor Types
pandas/core/generic.py
as_blocks
kapilepatel/pandas
python
def as_blocks(self, copy=True): '\n Convert the frame to a dict of dtype -> Constructor Types that each has\n a homogeneous dtype.\n\n .. deprecated:: 0.21.0\n\n NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in\n as_matrix)\n\n Parameters\n ----------\n copy : boolean, default True\n\n Returns\n -------\n values : a dict of dtype -> Constructor Types\n ' warnings.warn('as_blocks is deprecated and will be removed in a future version', FutureWarning, stacklevel=2) return self._to_dict_of_blocks(copy=copy)
@property def blocks(self): '\n Internal property, property synonym for as_blocks().\n\n .. deprecated:: 0.21.0\n ' return self.as_blocks()
231,908,323,301,257,660
Internal property, property synonym for as_blocks(). .. deprecated:: 0.21.0
pandas/core/generic.py
blocks
kapilepatel/pandas
python
@property def blocks(self): '\n Internal property, property synonym for as_blocks().\n\n .. deprecated:: 0.21.0\n ' return self.as_blocks()
def _to_dict_of_blocks(self, copy=True): '\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n ' return {k: self._constructor(v).__finalize__(self) for (k, v) in self._data.to_dict(copy=copy).items()}
2,710,795,027,365,615,000
Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY
pandas/core/generic.py
_to_dict_of_blocks
kapilepatel/pandas
python
def _to_dict_of_blocks(self, copy=True): '\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n ' return {k: self._constructor(v).__finalize__(self) for (k, v) in self._data.to_dict(copy=copy).items()}
def astype(self, dtype, copy=True, errors='raise', **kwargs): "\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n .. versionadded:: 0.20.0\n\n kwargs : keyword arguments to pass on to the constructor\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1,2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n " if is_dict_like(dtype): if (self.ndim == 1): if ((len(dtype) > 1) or (self.name not in dtype)): raise KeyError('Only the Series name can be used for the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, copy, errors, **kwargs) elif (self.ndim > 2): raise NotImplementedError('astype() only accepts a dtype arg of type dict when invoked on Series and DataFrames. A single dtype must be specified when invoked on a Panel.') for col_name in dtype.keys(): if (col_name not in self): raise KeyError('Only a column name can be used for the key in a dtype mappings argument.') results = [] for (col_name, col) in self.iteritems(): if (col_name in dtype): results.append(col.astype(dtype[col_name], copy=copy)) else: results.append(results.append((col.copy() if copy else col))) elif (is_extension_array_dtype(dtype) and (self.ndim > 1)): results = (self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns))) else: new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs) return self._constructor(new_data).__finalize__(self) result = pd.concat(results, axis=1, copy=False) result.columns = self.columns return result
8,743,313,055,423,462,000
Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object .. versionadded:: 0.20.0 kwargs : keyword arguments to pass on to the constructor Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> cat_dtype = pd.api.types.CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1,2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64
pandas/core/generic.py
astype
kapilepatel/pandas
python
def astype(self, dtype, copy=True, errors='raise', **kwargs): "\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n .. versionadded:: 0.20.0\n\n kwargs : keyword arguments to pass on to the constructor\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1,2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n " if is_dict_like(dtype): if (self.ndim == 1): if ((len(dtype) > 1) or (self.name not in dtype)): raise KeyError('Only the Series name can be used for the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, copy, errors, **kwargs) elif (self.ndim > 2): raise NotImplementedError('astype() only accepts a dtype arg of type dict when invoked on Series and DataFrames. A single dtype must be specified when invoked on a Panel.') for col_name in dtype.keys(): if (col_name not in self): raise KeyError('Only a column name can be used for the key in a dtype mappings argument.') results = [] for (col_name, col) in self.iteritems(): if (col_name in dtype): results.append(col.astype(dtype[col_name], copy=copy)) else: results.append(results.append((col.copy() if copy else col))) elif (is_extension_array_dtype(dtype) and (self.ndim > 1)): results = (self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns))) else: new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs) return self._constructor(new_data).__finalize__(self) result = pd.concat(results, axis=1, copy=False) result.columns = self.columns return result
def copy(self, deep=True): '\n Make a copy of this object\'s indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object\'s data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object\'s data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series, DataFrame or Panel\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=["a", "b"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=["a", "b"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n ' data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self)
-760,128,240,262,479,000
Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object
pandas/core/generic.py
copy
kapilepatel/pandas
python
def copy(self, deep=True): '\n Make a copy of this object\'s indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object\'s data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object\'s data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series, DataFrame or Panel\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=["a", "b"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=["a", "b"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n ' data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self)
def __deepcopy__(self, memo=None): '\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n ' if (memo is None): memo = {} return self.copy(deep=True)
7,599,436,350,404,427,000
Parameters ---------- memo, default None Standard signature. Unused
pandas/core/generic.py
__deepcopy__
kapilepatel/pandas
python
def __deepcopy__(self, memo=None): '\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n ' if (memo is None): memo = {} return self.copy(deep=True)
def _convert(self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True): '\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : boolean, default False\n If True, convert to date where possible.\n numeric : boolean, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : boolean, default False\n If True, convert to timedelta where possible.\n coerce : boolean, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT)\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n ' return self._constructor(self._data.convert(datetime=datetime, numeric=numeric, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self)
-4,038,469,186,852,983,300
Attempt to infer better dtype for object columns Parameters ---------- datetime : boolean, default False If True, convert to date where possible. numeric : boolean, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : boolean, default False If True, convert to timedelta where possible. coerce : boolean, default False If True, force conversion with unconvertible values converted to nulls (NaN or NaT) copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object
pandas/core/generic.py
_convert
kapilepatel/pandas
python
def _convert(self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True): '\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : boolean, default False\n If True, convert to date where possible.\n numeric : boolean, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : boolean, default False\n If True, convert to timedelta where possible.\n coerce : boolean, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT)\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n ' return self._constructor(self._data.convert(datetime=datetime, numeric=numeric, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self)
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): "\n Attempt to infer better dtype for object columns.\n\n .. deprecated:: 0.21.0\n\n Parameters\n ----------\n convert_dates : boolean, default True\n If True, convert to date where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n convert_numeric : boolean, default False\n If True, attempt to coerce to numbers (including strings), with\n unconvertible values becoming NaN.\n convert_timedeltas : boolean, default True\n If True, convert to timedelta where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n " msg = 'convert_objects is deprecated. To re-infer data dtypes for object columns, use {klass}.infer_objects()\nFor all other conversions use the data-type specific converters pd.to_datetime, pd.to_timedelta and pd.to_numeric.'.format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
9,178,976,238,945,957,000
Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type.
pandas/core/generic.py
convert_objects
kapilepatel/pandas
python
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): "\n Attempt to infer better dtype for object columns.\n\n .. deprecated:: 0.21.0\n\n Parameters\n ----------\n convert_dates : boolean, default True\n If True, convert to date where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n convert_numeric : boolean, default False\n If True, attempt to coerce to numbers (including strings), with\n unconvertible values becoming NaN.\n convert_timedeltas : boolean, default True\n If True, convert to timedelta where possible. If 'coerce', force\n conversion, with unconvertible values becoming NaT.\n copy : boolean, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n " msg = 'convert_objects is deprecated. To re-infer data dtypes for object columns, use {klass}.infer_objects()\nFor all other conversions use the data-type specific converters pd.to_datetime, pd.to_timedelta and pd.to_numeric.'.format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
def infer_objects(self): '\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n ' return self._constructor(self._data.convert(datetime=True, numeric=False, timedelta=True, coerce=False, copy=True)).__finalize__(self)
2,290,033,283,622,723,600
Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. .. versionadded:: 0.21.0 Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object
pandas/core/generic.py
infer_objects
kapilepatel/pandas
python
def infer_objects(self): '\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n ' return self._constructor(self._data.convert(datetime=True, numeric=False, timedelta=True, coerce=False, copy=True)).__finalize__(self)
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): "\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : %(axes_single_arg)s\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n %(klass)s\n Object with missing values filled.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n " inplace = validate_bool_kwarg(inplace, 'inplace') (value, method) = validate_fillna_kwargs(value, method) self._consolidate_inplace() if (axis is None): axis = 0 axis = self._get_axis_number(axis) from pandas import DataFrame if (value is None): if (self._is_mixed_type and (axis == 1)): if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T result._data = result._data.downcast() return result if (self.ndim > 3): raise NotImplementedError('Cannot fillna with a method for > 3dims') elif (self.ndim == 3): result = {col: s.fillna(method=method, value=value) for (col, s) in self.iteritems()} prelim_obj = self._constructor.from_dict(result) new_obj = prelim_obj.__finalize__(self) new_data = new_obj._data else: new_data = self._data.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, coerce=True, downcast=downcast) else: if (len(self._get_axis(axis)) == 0): return self if (self.ndim == 1): if isinstance(value, (dict, ABCSeries)): from pandas import Series value = Series(value) elif (not is_list_like(value)): pass else: raise TypeError('"value" parameter must be a scalar, dict or Series, but you passed a "{0}"'.format(type(value).__name__)) new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif isinstance(value, (dict, ABCSeries)): if (axis == 1): raise NotImplementedError('Currently only can fill with dict/Series column by column') result = (self if inplace else self.copy()) for (k, v) in compat.iteritems(value): if (k not in result): continue obj = result[k] obj.fillna(v, limit=limit, inplace=True, downcast=downcast) return (result if (not inplace) else None) elif (not is_list_like(value)): new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif (isinstance(value, DataFrame) and (self.ndim == 2)): new_data = self.where(self.notna(), value) else: raise ValueError(('invalid fill value with a %s' % type(value))) if inplace: self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self)
-4,337,317,876,121,165,300
Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use next valid observation to fill gap. axis : %(axes_single_arg)s Axis along which to fill missing values. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- %(klass)s Object with missing values filled. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, 5], ... [np.nan, 3, np.nan, 4]], ... columns=list('ABCD')) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 NaN 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 0.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 NaN 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 2.0 4 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 NaN 1 2 NaN 1.0 NaN 5 3 NaN 3.0 NaN 4
pandas/core/generic.py
fillna
kapilepatel/pandas
python
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): "\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : %(axes_single_arg)s\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n %(klass)s\n Object with missing values filled.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n " inplace = validate_bool_kwarg(inplace, 'inplace') (value, method) = validate_fillna_kwargs(value, method) self._consolidate_inplace() if (axis is None): axis = 0 axis = self._get_axis_number(axis) from pandas import DataFrame if (value is None): if (self._is_mixed_type and (axis == 1)): if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T result._data = result._data.downcast() return result if (self.ndim > 3): raise NotImplementedError('Cannot fillna with a method for > 3dims') elif (self.ndim == 3): result = {col: s.fillna(method=method, value=value) for (col, s) in self.iteritems()} prelim_obj = self._constructor.from_dict(result) new_obj = prelim_obj.__finalize__(self) new_data = new_obj._data else: new_data = self._data.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, coerce=True, downcast=downcast) else: if (len(self._get_axis(axis)) == 0): return self if (self.ndim == 1): if isinstance(value, (dict, ABCSeries)): from pandas import Series value = Series(value) elif (not is_list_like(value)): pass else: raise TypeError('"value" parameter must be a scalar, dict or Series, but you passed a "{0}"'.format(type(value).__name__)) new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif isinstance(value, (dict, ABCSeries)): if (axis == 1): raise NotImplementedError('Currently only can fill with dict/Series column by column') result = (self if inplace else self.copy()) for (k, v) in compat.iteritems(value): if (k not in result): continue obj = result[k] obj.fillna(v, limit=limit, inplace=True, downcast=downcast) return (result if (not inplace) else None) elif (not is_list_like(value)): new_data = self._data.fillna(value=value, limit=limit, inplace=inplace, downcast=downcast) elif (isinstance(value, DataFrame) and (self.ndim == 2)): new_data = self.where(self.notna(), value) else: raise ValueError(('invalid fill value with a %s' % type(value))) if inplace: self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None): "\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n " return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)
5,858,174,158,121,773,000
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
pandas/core/generic.py
ffill
kapilepatel/pandas
python
def ffill(self, axis=None, inplace=False, limit=None, downcast=None): "\n \n " return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None): "\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n " return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)
-6,281,506,511,002,993,000
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
pandas/core/generic.py
bfill
kapilepatel/pandas
python
def bfill(self, axis=None, inplace=False, limit=None, downcast=None): "\n \n " return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast)
@Appender((_shared_docs['interpolate'] % _shared_doc_kwargs)) def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs): '\n Interpolate values according to different methods.\n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (self.ndim > 2): raise NotImplementedError('Interpolate has not been implemented on Panel and Panel 4D objects.') if (axis == 0): ax = self._info_axis_name _maybe_transposed_self = self elif (axis == 1): _maybe_transposed_self = self.T ax = 1 else: _maybe_transposed_self = self ax = _maybe_transposed_self._get_axis_number(ax) if (_maybe_transposed_self.ndim == 2): alt_ax = (1 - ax) else: alt_ax = ax if (isinstance(_maybe_transposed_self.index, MultiIndex) and (method != 'linear')): raise ValueError('Only `method=linear` interpolation is supported on MultiIndexes.') if (_maybe_transposed_self._data.get_dtype_counts().get('object') == len(_maybe_transposed_self.T)): raise TypeError('Cannot interpolate with all object-dtype columns in the DataFrame. Try setting at least one column to a numeric dtype.') if (method == 'linear'): index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax))) else: index = _maybe_transposed_self._get_axis(alt_ax) if isna(index).any(): raise NotImplementedError('Interpolation with NaNs in the index has not been implemented. Try filling those NaNs before interpolating.') data = _maybe_transposed_self._data new_data = data.interpolate(method=method, axis=ax, index=index, values=_maybe_transposed_self, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs) if inplace: if (axis == 1): new_data = self._constructor(new_data).T._data self._update_inplace(new_data) else: res = self._constructor(new_data).__finalize__(self) if (axis == 1): res = res.T return res
967,257,470,192,600,600
Interpolate values according to different methods.
pandas/core/generic.py
interpolate
kapilepatel/pandas
python
@Appender((_shared_docs['interpolate'] % _shared_doc_kwargs)) def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs): '\n \n ' inplace = validate_bool_kwarg(inplace, 'inplace') if (self.ndim > 2): raise NotImplementedError('Interpolate has not been implemented on Panel and Panel 4D objects.') if (axis == 0): ax = self._info_axis_name _maybe_transposed_self = self elif (axis == 1): _maybe_transposed_self = self.T ax = 1 else: _maybe_transposed_self = self ax = _maybe_transposed_self._get_axis_number(ax) if (_maybe_transposed_self.ndim == 2): alt_ax = (1 - ax) else: alt_ax = ax if (isinstance(_maybe_transposed_self.index, MultiIndex) and (method != 'linear')): raise ValueError('Only `method=linear` interpolation is supported on MultiIndexes.') if (_maybe_transposed_self._data.get_dtype_counts().get('object') == len(_maybe_transposed_self.T)): raise TypeError('Cannot interpolate with all object-dtype columns in the DataFrame. Try setting at least one column to a numeric dtype.') if (method == 'linear'): index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax))) else: index = _maybe_transposed_self._get_axis(alt_ax) if isna(index).any(): raise NotImplementedError('Interpolation with NaNs in the index has not been implemented. Try filling those NaNs before interpolating.') data = _maybe_transposed_self._data new_data = data.interpolate(method=method, axis=ax, index=index, values=_maybe_transposed_self, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs) if inplace: if (axis == 1): new_data = self._constructor(new_data).T._data self._update_inplace(new_data) else: res = self._constructor(new_data).__finalize__(self) if (axis == 1): res = res.T return res
def asof(self, where, subset=None): "\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n .. versionadded:: 0.19.0 For DataFrame\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n " if isinstance(where, compat.string_types): from pandas import to_datetime where = to_datetime(where) if (not self.index.is_monotonic): raise ValueError('asof requires a sorted index') is_series = isinstance(self, ABCSeries) if is_series: if (subset is not None): raise ValueError('subset is not valid for Series') elif (self.ndim > 2): raise NotImplementedError('asof is not implemented for {type}'.format(type=type(self))) else: if (subset is None): subset = self.columns if (not is_list_like(subset)): subset = [subset] is_list = is_list_like(where) if (not is_list): start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq).ordinal start = start.ordinal if (where < start): if (not is_series): from pandas import Series return Series(index=self.columns, name=where) return np.nan if is_series: loc = self.index.searchsorted(where, side='right') if (loc > 0): loc -= 1 values = self._values while ((loc > 0) and isna(values[loc])): loc -= 1 return values[loc] if (not isinstance(where, Index)): where = (Index(where) if is_list else Index([where])) nulls = (self.isna() if is_series else self[subset].isna().any(1)) if nulls.all(): if is_series: return self._constructor(np.nan, index=where, name=self.name) elif is_list: from pandas import DataFrame return DataFrame(np.nan, index=where, columns=self.columns) else: from pandas import Series return Series(np.nan, index=self.columns, name=where[0]) locs = self.index.asof_locs(where, (~ nulls.values)) missing = (locs == (- 1)) data = self.take(locs, is_copy=False) data.index = where data.loc[missing] = np.nan return (data if is_list else data.iloc[(- 1)])
4,421,495,940,943,718,000
Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) .. versionadded:: 0.19.0 For DataFrame If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30.0 NaN 2018-02-27 09:04:30 40.0 NaN
pandas/core/generic.py
asof
kapilepatel/pandas
python
def asof(self, where, subset=None): "\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n .. versionadded:: 0.19.0 For DataFrame\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n " if isinstance(where, compat.string_types): from pandas import to_datetime where = to_datetime(where) if (not self.index.is_monotonic): raise ValueError('asof requires a sorted index') is_series = isinstance(self, ABCSeries) if is_series: if (subset is not None): raise ValueError('subset is not valid for Series') elif (self.ndim > 2): raise NotImplementedError('asof is not implemented for {type}'.format(type=type(self))) else: if (subset is None): subset = self.columns if (not is_list_like(subset)): subset = [subset] is_list = is_list_like(where) if (not is_list): start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq).ordinal start = start.ordinal if (where < start): if (not is_series): from pandas import Series return Series(index=self.columns, name=where) return np.nan if is_series: loc = self.index.searchsorted(where, side='right') if (loc > 0): loc -= 1 values = self._values while ((loc > 0) and isna(values[loc])): loc -= 1 return values[loc] if (not isinstance(where, Index)): where = (Index(where) if is_list else Index([where])) nulls = (self.isna() if is_series else self[subset].isna().any(1)) if nulls.all(): if is_series: return self._constructor(np.nan, index=where, name=self.name) elif is_list: from pandas import DataFrame return DataFrame(np.nan, index=where, columns=self.columns) else: from pandas import Series return Series(np.nan, index=self.columns, name=where[0]) locs = self.index.asof_locs(where, (~ nulls.values)) missing = (locs == (- 1)) data = self.take(locs, is_copy=False) data.index = where data.loc[missing] = np.nan return (data if is_list else data.iloc[(- 1)])
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs): "\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n " if isinstance(self, ABCPanel): raise NotImplementedError('clip is not supported yet for panels') inplace = validate_bool_kwarg(inplace, 'inplace') axis = nv.validate_clip_with_axis(axis, args, kwargs) if (axis is not None): axis = self._get_axis_number(axis) if ((not is_list_like(lower)) and np.any(pd.isnull(lower))): lower = None if ((not is_list_like(upper)) and np.any(pd.isnull(upper))): upper = None if ((lower is not None) and (upper is not None)): if (is_scalar(lower) and is_scalar(upper)): (lower, upper) = (min(lower, upper), max(lower, upper)) if (((lower is None) or (is_scalar(lower) and is_number(lower))) and ((upper is None) or (is_scalar(upper) and is_number(upper)))): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if (lower is not None): result = result._clip_with_one_bound(lower, method=self.ge, axis=axis, inplace=inplace) if (upper is not None): if inplace: result = self result = result._clip_with_one_bound(upper, method=self.le, axis=axis, inplace=inplace) return result
5,896,010,558,430,913,000
Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array_like, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or array_like, default None Maximum threshold value. All values above this threshold will be set to it. axis : int or str axis name, optional Align object with lower and upper along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame Same type as calling object with the values outside the clip boundaries replaced. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3
pandas/core/generic.py
clip
kapilepatel/pandas
python
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs): "\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n " if isinstance(self, ABCPanel): raise NotImplementedError('clip is not supported yet for panels') inplace = validate_bool_kwarg(inplace, 'inplace') axis = nv.validate_clip_with_axis(axis, args, kwargs) if (axis is not None): axis = self._get_axis_number(axis) if ((not is_list_like(lower)) and np.any(pd.isnull(lower))): lower = None if ((not is_list_like(upper)) and np.any(pd.isnull(upper))): upper = None if ((lower is not None) and (upper is not None)): if (is_scalar(lower) and is_scalar(upper)): (lower, upper) = (min(lower, upper), max(lower, upper)) if (((lower is None) or (is_scalar(lower) and is_number(lower))) and ((upper is None) or (is_scalar(upper) and is_number(upper)))): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if (lower is not None): result = result._clip_with_one_bound(lower, method=self.ge, axis=axis, inplace=inplace) if (upper is not None): if inplace: result = self result = result._clip_with_one_bound(upper, method=self.le, axis=axis, inplace=inplace) return result
def clip_upper(self, threshold, axis=None, inplace=False): "\n Trim values above a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(upper=threshold) instead.\n\n Elements above the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Maximum value allowed. All values above threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align object with `threshold` along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.clip(upper=3)\n 0 1\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n >>> elemwise_thresholds = [5, 4, 3, 2, 1]\n >>> elemwise_thresholds\n [5, 4, 3, 2, 1]\n\n >>> s.clip(upper=elemwise_thresholds)\n 0 1\n 1 2\n 2 3\n 3 2\n 4 1\n dtype: int64\n " warnings.warn('clip_upper(threshold) is deprecated, use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
3,718,133,413,871,810,000
Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64
pandas/core/generic.py
clip_upper
kapilepatel/pandas
python
def clip_upper(self, threshold, axis=None, inplace=False): "\n Trim values above a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(upper=threshold) instead.\n\n Elements above the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Maximum value allowed. All values above threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it's compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Align object with `threshold` along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.clip(upper=3)\n 0 1\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n >>> elemwise_thresholds = [5, 4, 3, 2, 1]\n >>> elemwise_thresholds\n [5, 4, 3, 2, 1]\n\n >>> s.clip(upper=elemwise_thresholds)\n 0 1\n 1 2\n 2 3\n 3 2\n 4 1\n dtype: int64\n " warnings.warn('clip_upper(threshold) is deprecated, use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
def clip_lower(self, threshold, axis=None, inplace=False): '\n Trim values below a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(lower=threshold) instead.\n\n Elements below the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Minimum value allowed. All values below threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it\'s compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Align `self` with `threshold` along the given axis.\n\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n\n Series single threshold clipping:\n\n >>> s = pd.Series([5, 6, 7, 8, 9])\n >>> s.clip(lower=8)\n 0 8\n 1 8\n 2 8\n 3 8\n 4 9\n dtype: int64\n\n Series clipping element-wise using an array of thresholds. `threshold`\n should be the same length as the Series.\n\n >>> elemwise_thresholds = [4, 8, 7, 2, 5]\n >>> s.clip(lower=elemwise_thresholds)\n 0 5\n 1 8\n 2 7\n 3 8\n 4 9\n dtype: int64\n\n DataFrames can be compared to a scalar.\n\n >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})\n >>> df\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=3)\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n Or to an array of values. By default, `threshold` should be the same\n shape as the DataFrame.\n\n >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))\n A B\n 0 3 4\n 1 3 4\n 2 6 6\n\n Control how `threshold` is broadcast with `axis`. In this case\n `threshold` should be the same length as the axis specified by\n `axis`.\n\n >>> df.clip(lower=[3, 3, 5], axis=\'index\')\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=[4, 5], axis=\'columns\')\n A B\n 0 4 5\n 1 4 5\n 2 5 6\n ' warnings.warn('clip_lower(threshold) is deprecated, use clip(lower=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace)
888,493,861,598,057,000
Trim values below a given threshold. .. deprecated:: 0.24.0 Use clip(lower=threshold) instead. Elements below the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Minimum value allowed. All values below threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) >>> s.clip(lower=8) 0 8 1 8 2 8 3 8 4 9 dtype: int64 Series clipping element-wise using an array of thresholds. `threshold` should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 3 8 4 9 dtype: int64 DataFrames can be compared to a scalar. >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) >>> df A B 0 1 2 1 3 4 2 5 6 >>> df.clip(lower=3) A B 0 3 3 1 3 4 2 5 6 Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 2 6 6 Control how `threshold` is broadcast with `axis`. In this case `threshold` should be the same length as the axis specified by `axis`. >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5 2 5 6
pandas/core/generic.py
clip_lower
kapilepatel/pandas
python
def clip_lower(self, threshold, axis=None, inplace=False): '\n Trim values below a given threshold.\n\n .. deprecated:: 0.24.0\n Use clip(lower=threshold) instead.\n\n Elements below the `threshold` will be changed to match the\n `threshold` value(s). Threshold can be a single value or an array,\n in the latter case it performs the truncation element-wise.\n\n Parameters\n ----------\n threshold : numeric or array-like\n Minimum value allowed. All values below threshold will be set to\n this value.\n\n * float : every value is compared to `threshold`.\n * array-like : The shape of `threshold` should match the object\n it\'s compared to. When `self` is a Series, `threshold` should be\n the length. When `self` is a DataFrame, `threshold` should 2-D\n and the same shape as `self` for ``axis=None``, or 1-D and the\n same length as the axis being compared.\n\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Align `self` with `threshold` along the given axis.\n\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series or DataFrame\n Original data with values trimmed.\n\n See Also\n --------\n Series.clip : General purpose method to trim Series values to given\n threshold(s).\n DataFrame.clip : General purpose method to trim DataFrame values to\n given threshold(s).\n\n Examples\n --------\n\n Series single threshold clipping:\n\n >>> s = pd.Series([5, 6, 7, 8, 9])\n >>> s.clip(lower=8)\n 0 8\n 1 8\n 2 8\n 3 8\n 4 9\n dtype: int64\n\n Series clipping element-wise using an array of thresholds. `threshold`\n should be the same length as the Series.\n\n >>> elemwise_thresholds = [4, 8, 7, 2, 5]\n >>> s.clip(lower=elemwise_thresholds)\n 0 5\n 1 8\n 2 7\n 3 8\n 4 9\n dtype: int64\n\n DataFrames can be compared to a scalar.\n\n >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})\n >>> df\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=3)\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n Or to an array of values. By default, `threshold` should be the same\n shape as the DataFrame.\n\n >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))\n A B\n 0 3 4\n 1 3 4\n 2 6 6\n\n Control how `threshold` is broadcast with `axis`. In this case\n `threshold` should be the same length as the axis specified by\n `axis`.\n\n >>> df.clip(lower=[3, 3, 5], axis=\'index\')\n A B\n 0 3 3\n 1 3 4\n 2 5 6\n\n >>> df.clip(lower=[4, 5], axis=\'columns\')\n A B\n 0 4 5\n 1 4 5\n 2 5 6\n ' warnings.warn('clip_lower(threshold) is deprecated, use clip(lower=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): '\n Group DataFrame or Series using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it\'s called on each value of the object\'s\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series\' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted a (single) key.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively "SQL-style" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n **kwargs\n Optional, only accepts keyword argument \'mutated\' and is passed\n to groupby.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby([\'Animal\']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [[\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\'],\n ... [\'Captive\', \'Wild\', \'Captive\', \'Wild\']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=(\'Animal\', \'Type\'))\n >>> df = pd.DataFrame({\'Max Speed\': [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level=1).mean()\n Max Speed\n Type\n Captive 210.0\n Wild 185.0\n ' from pandas.core.groupby.groupby import groupby if ((level is None) and (by is None)): raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs)
6,851,353,740,028,579,000
Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Captive 210.0 Wild 185.0
pandas/core/generic.py
groupby
kapilepatel/pandas
python
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): '\n Group DataFrame or Series using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it\'s called on each value of the object\'s\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series\' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted a (single) key.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively "SQL-style" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n **kwargs\n Optional, only accepts keyword argument \'mutated\' and is passed\n to groupby.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby([\'Animal\']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [[\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\'],\n ... [\'Captive\', \'Wild\', \'Captive\', \'Wild\']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=(\'Animal\', \'Type\'))\n >>> df = pd.DataFrame({\'Max Speed\': [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level=1).mean()\n Max Speed\n Type\n Captive 210.0\n Wild 185.0\n ' from pandas.core.groupby.groupby import groupby if ((level is None) and (by is None)): raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): "\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset object, or string\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill\n how : {'start', 'end'}, default end\n For PeriodIndex only, see PeriodIndex.asfreq\n normalize : bool, default False\n Whether to reset output index to midnight\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n " from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
-73,085,711,971,995,800
Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0
pandas/core/generic.py
asfreq
kapilepatel/pandas
python
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): "\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset object, or string\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill\n how : {'start', 'end'}, default end\n For PeriodIndex only, see PeriodIndex.asfreq\n normalize : bool, default False\n Whether to reset output index to midnight\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n " from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
def at_time(self, time, asof=False, axis=None): "\n Select values at particular time of day (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis)
1,794,167,630,809,698,000
Select values at particular time of day (e.g. 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4
pandas/core/generic.py
at_time
kapilepatel/pandas
python
def at_time(self, time, asof=False, axis=None): "\n Select values at particular time of day (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis)
def between_time(self, start_time, end_time, include_start=True, include_end=True, axis=None): "\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n end_time : datetime.time or str\n include_start : bool, default True\n include_end : bool, default True\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_between_time(start_time, end_time, include_start=include_start, include_end=include_end) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis)
7,100,925,896,181,392,000
Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str end_time : datetime.time or str include_start : bool, default True include_end : bool, default True axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4
pandas/core/generic.py
between_time
kapilepatel/pandas
python
def between_time(self, start_time, end_time, include_start=True, include_end=True, axis=None): "\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n end_time : datetime.time or str\n include_start : bool, default True\n include_end : bool, default True\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n " if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_between_time(start_time, end_time, include_start=include_start, include_end=include_end) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis)
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): '\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n how : str\n Method for down/re-sampling, default to \'mean\' for downsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).mean()``, or\n ``.resample(...).apply(<func>)``\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n fill_method : str, default None\n Filling method for upsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).<func>()``,\n e.g. ``.resample(...).pad()``\n closed : {\'right\', \'left\'}, default None\n Which side of bin interval is closed. The default is \'left\'\n for all frequency offsets except for \'M\', \'A\', \'Q\', \'BM\',\n \'BA\', \'BQ\', and \'W\' which all have a default of \'right\'.\n label : {\'right\', \'left\'}, default None\n Which bin edge label to label bucket with. The default is \'left\'\n for all frequency offsets except for \'M\', \'A\', \'Q\', \'BM\',\n \'BA\', \'BQ\', and \'W\' which all have a default of \'right\'.\n convention : {\'start\', \'end\', \'s\', \'e\'}, default \'start\'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {\'timestamp\', \'period\'}, optional, default None\n Pass \'timestamp\' to convert the resulting index to a\n `DateTimeIndex` or \'period\' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n limit : int, default None\n Maximum size gap when reindexing with `fill_method`.\n\n .. deprecated:: 0.18.0\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the "origin" of the\n aggregated intervals. For example, for \'5min\' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range(\'1/1/2000\', periods=9, freq=\'T\')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample(\'3T\').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample(\'3T\', label=\'right\').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample(\'3T\', label=\'right\', closed=\'right\').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample(\'30S\').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample(\'30S\').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample(\'30S\').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample(\'3T\').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using \'start\' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range(\'2012-01-01\',\n ... freq=\'A\',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample(\'Q\', convention=\'start\').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using \'end\' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range(\'2018-01-01\',\n ... freq=\'Q\',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample(\'M\', convention=\'end\').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({\'price\': [10, 11, 9, 13, 14, 18, 17, 19],\n ... \'volume\': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df[\'week_starting\'] = pd.date_range(\'01/01/2018\',\n ... periods=8,\n ... freq=\'W\')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample(\'M\', on=\'week_starting\').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range(\'1/1/2000\', periods=4, freq=\'D\')\n >>> d2 = dict({\'price\': [10, 11, 9, 13, 14, 18, 17, 19],\n ... \'volume\': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... [\'morning\',\n ... \'afternoon\']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample(\'D\', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n ' from pandas.core.resample import resample, _maybe_process_deprecations axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
4,481,400,105,411,095,000
Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90
pandas/core/generic.py
resample
kapilepatel/pandas
python
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): '\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n how : str\n Method for down/re-sampling, default to \'mean\' for downsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).mean()``, or\n ``.resample(...).apply(<func>)``\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n fill_method : str, default None\n Filling method for upsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).<func>()``,\n e.g. ``.resample(...).pad()``\n closed : {\'right\', \'left\'}, default None\n Which side of bin interval is closed. The default is \'left\'\n for all frequency offsets except for \'M\', \'A\', \'Q\', \'BM\',\n \'BA\', \'BQ\', and \'W\' which all have a default of \'right\'.\n label : {\'right\', \'left\'}, default None\n Which bin edge label to label bucket with. The default is \'left\'\n for all frequency offsets except for \'M\', \'A\', \'Q\', \'BM\',\n \'BA\', \'BQ\', and \'W\' which all have a default of \'right\'.\n convention : {\'start\', \'end\', \'s\', \'e\'}, default \'start\'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {\'timestamp\', \'period\'}, optional, default None\n Pass \'timestamp\' to convert the resulting index to a\n `DateTimeIndex` or \'period\' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n limit : int, default None\n Maximum size gap when reindexing with `fill_method`.\n\n .. deprecated:: 0.18.0\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the "origin" of the\n aggregated intervals. For example, for \'5min\' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range(\'1/1/2000\', periods=9, freq=\'T\')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample(\'3T\').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample(\'3T\', label=\'right\').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample(\'3T\', label=\'right\', closed=\'right\').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample(\'30S\').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample(\'30S\').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample(\'30S\').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample(\'3T\').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using \'start\' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range(\'2012-01-01\',\n ... freq=\'A\',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample(\'Q\', convention=\'start\').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using \'end\' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range(\'2018-01-01\',\n ... freq=\'Q\',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample(\'M\', convention=\'end\').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({\'price\': [10, 11, 9, 13, 14, 18, 17, 19],\n ... \'volume\': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df[\'week_starting\'] = pd.date_range(\'01/01/2018\',\n ... periods=8,\n ... freq=\'W\')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample(\'M\', on=\'week_starting\').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range(\'1/1/2000\', periods=4, freq=\'D\')\n >>> d2 = dict({\'price\': [10, 11, 9, 13, 14, 18, 17, 19],\n ... \'volume\': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... [\'morning\',\n ... \'afternoon\']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample(\'D\', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n ' from pandas.core.resample import resample, _maybe_process_deprecations axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
def first(self, offset): "\n Convenience method for subsetting initial periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n " if (not isinstance(self.index, DatetimeIndex)): raise TypeError("'first' only supports a DatetimeIndex index") if (len(self.index) == 0): return self offset = to_offset(offset) end_date = end = (self.index[0] + offset) if ((not offset.isAnchored()) and hasattr(offset, '_inc')): if (end_date in self.index): end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end]
7,257,172,351,481,007,000
Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned.
pandas/core/generic.py
first
kapilepatel/pandas
python
def first(self, offset): "\n Convenience method for subsetting initial periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n " if (not isinstance(self.index, DatetimeIndex)): raise TypeError("'first' only supports a DatetimeIndex index") if (len(self.index) == 0): return self offset = to_offset(offset) end_date = end = (self.index[0] + offset) if ((not offset.isAnchored()) and hasattr(offset, '_inc')): if (end_date in self.index): end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end]
def last(self, offset): "\n Convenience method for subsetting final periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n " if (not isinstance(self.index, DatetimeIndex)): raise TypeError("'last' only supports a DatetimeIndex index") if (len(self.index) == 0): return self offset = to_offset(offset) start_date = (self.index[(- 1)] - offset) start = self.index.searchsorted(start_date, side='right') return self.iloc[start:]
-2,487,377,775,119,907,000
Convenience method for subsetting final periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calender days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned.
pandas/core/generic.py
last
kapilepatel/pandas
python
def last(self, offset): "\n Convenience method for subsetting final periods of time series data\n based on a date offset.\n\n Parameters\n ----------\n offset : string, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n " if (not isinstance(self.index, DatetimeIndex)): raise TypeError("'last' only supports a DatetimeIndex index") if (len(self.index) == 0): return self offset = to_offset(offset) start_date = (self.index[(- 1)] - offset) start = self.index.searchsorted(start_date, side='right') return self.iloc[start:]
def rank(self, axis=0, method='average', numeric_only=None, na_option='keep', ascending=True, pct=False): "\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n index to direct ranking\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n numeric_only : boolean, default None\n Include only float, int, boolean data. Valid only for DataFrame or\n Panel objects\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : same type as caller\n " axis = self._get_axis_number(axis) if (self.ndim > 2): msg = 'rank does not make sense when ndim > 2' raise NotImplementedError(msg) if (na_option not in {'keep', 'top', 'bottom'}): msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): ranks = algos.rank(data.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) ranks = self._constructor(ranks, **data._construct_axes_dict()) return ranks.__finalize__(self) if (numeric_only is None): try: return ranker(self) except TypeError: numeric_only = True if numeric_only: data = self._get_numeric_data() else: data = self return ranker(data)
-6,817,094,016,526,466,000
Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 index to direct ranking method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups numeric_only : boolean, default None Include only float, int, boolean data. Valid only for DataFrame or Panel objects na_option : {'keep', 'top', 'bottom'} * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending ascending : boolean, default True False for ranks by high (1) to low (N) pct : boolean, default False Computes percentage rank of data Returns ------- ranks : same type as caller
pandas/core/generic.py
rank
kapilepatel/pandas
python
def rank(self, axis=0, method='average', numeric_only=None, na_option='keep', ascending=True, pct=False): "\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n index to direct ranking\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n numeric_only : boolean, default None\n Include only float, int, boolean data. Valid only for DataFrame or\n Panel objects\n na_option : {'keep', 'top', 'bottom'}\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : same type as caller\n " axis = self._get_axis_number(axis) if (self.ndim > 2): msg = 'rank does not make sense when ndim > 2' raise NotImplementedError(msg) if (na_option not in {'keep', 'top', 'bottom'}): msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): ranks = algos.rank(data.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) ranks = self._constructor(ranks, **data._construct_axes_dict()) return ranks.__finalize__(self) if (numeric_only is None): try: return ranker(self) except TypeError: numeric_only = True if numeric_only: data = self._get_numeric_data() else: data = self return ranker(data)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors='raise', try_cast=False): '\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n ' inplace = validate_bool_kwarg(inplace, 'inplace') cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): (cond, _) = cond.align(self, join='right', broadcast_axis=1) else: if (not hasattr(cond, 'shape')): cond = np.asanyarray(cond) if (cond.shape != self.shape): raise ValueError('Array conditional must be same shape as self') cond = self._constructor(cond, **self._construct_axes_dict()) fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = 'Boolean array expected for the condition, not {dtype}' if (not isinstance(cond, pd.DataFrame)): if (not is_bool_dtype(cond)): raise ValueError(msg.format(dtype=cond.dtype)) elif (not cond.empty): for dt in cond.dtypes: if (not is_bool_dtype(dt)): raise ValueError(msg.format(dtype=dt)) cond = ((- cond) if inplace else cond) try_quick = True if hasattr(other, 'align'): if (other.ndim <= self.ndim): (_, other) = self.align(other, join='left', axis=axis, level=level, fill_value=np.nan) if ((axis is None) and (not all((other._get_axis(i).equals(ax) for (i, ax) in enumerate(self.axes))))): raise InvalidIndexError else: raise NotImplementedError('cannot align with a higher dimensional NDFrame') if isinstance(other, np.ndarray): if (other.shape != self.shape): if (self.ndim == 1): icond = cond.values if (len(other) == 1): other = np.array(other[0]) elif (len(cond[icond]) == len(other)): if try_quick: try: new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other except Exception: try_quick = False if (not try_quick): (dtype, fill_value) = maybe_promote(other.dtype) new_other = np.empty(len(icond), dtype=dtype) new_other.fill(fill_value) maybe_upcast_putmask(new_other, icond, other) other = new_other else: raise ValueError('Length of replacements must equal series length') else: raise ValueError('other must be the same shape as self when an ndarray') else: other = self._constructor(other, **self._construct_axes_dict()) if (axis is None): axis = 0 if (self.ndim == getattr(other, 'ndim', 0)): align = True else: align = (self._get_axis_number(axis) == 1) block_axis = self._get_block_manager_axis(axis) if inplace: self._check_inplace_setting(other) new_data = self._data.putmask(mask=cond, new=other, align=align, inplace=True, axis=block_axis, transpose=self._AXIS_REVERSED) self._update_inplace(new_data) else: new_data = self._data.where(other=other, cond=cond, align=align, errors=errors, try_cast=try_cast, axis=block_axis, transpose=self._AXIS_REVERSED) return self._constructor(new_data).__finalize__(self)
7,214,991,789,331,710,000
Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__.
pandas/core/generic.py
_where
kapilepatel/pandas
python
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors='raise', try_cast=False): '\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n ' inplace = validate_bool_kwarg(inplace, 'inplace') cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): (cond, _) = cond.align(self, join='right', broadcast_axis=1) else: if (not hasattr(cond, 'shape')): cond = np.asanyarray(cond) if (cond.shape != self.shape): raise ValueError('Array conditional must be same shape as self') cond = self._constructor(cond, **self._construct_axes_dict()) fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = 'Boolean array expected for the condition, not {dtype}' if (not isinstance(cond, pd.DataFrame)): if (not is_bool_dtype(cond)): raise ValueError(msg.format(dtype=cond.dtype)) elif (not cond.empty): for dt in cond.dtypes: if (not is_bool_dtype(dt)): raise ValueError(msg.format(dtype=dt)) cond = ((- cond) if inplace else cond) try_quick = True if hasattr(other, 'align'): if (other.ndim <= self.ndim): (_, other) = self.align(other, join='left', axis=axis, level=level, fill_value=np.nan) if ((axis is None) and (not all((other._get_axis(i).equals(ax) for (i, ax) in enumerate(self.axes))))): raise InvalidIndexError else: raise NotImplementedError('cannot align with a higher dimensional NDFrame') if isinstance(other, np.ndarray): if (other.shape != self.shape): if (self.ndim == 1): icond = cond.values if (len(other) == 1): other = np.array(other[0]) elif (len(cond[icond]) == len(other)): if try_quick: try: new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other except Exception: try_quick = False if (not try_quick): (dtype, fill_value) = maybe_promote(other.dtype) new_other = np.empty(len(icond), dtype=dtype) new_other.fill(fill_value) maybe_upcast_putmask(new_other, icond, other) other = new_other else: raise ValueError('Length of replacements must equal series length') else: raise ValueError('other must be the same shape as self when an ndarray') else: other = self._constructor(other, **self._construct_axes_dict()) if (axis is None): axis = 0 if (self.ndim == getattr(other, 'ndim', 0)): align = True else: align = (self._get_axis_number(axis) == 1) block_axis = self._get_block_manager_axis(axis) if inplace: self._check_inplace_setting(other) new_data = self._data.putmask(mask=cond, new=other, align=align, inplace=True, axis=block_axis, transpose=self._AXIS_REVERSED) self._update_inplace(new_data) else: new_data = self._data.where(other=other, cond=cond, align=align, errors=errors, try_cast=try_cast, axis=block_axis, transpose=self._AXIS_REVERSED) return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0): '\n Equivalent to `shift` without copying data. The shifted data will\n not include the dropped periods and the shifted axis will be smaller\n than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n ' if (periods == 0): return self if (periods > 0): vslicer = slice(None, (- periods)) islicer = slice(periods, None) else: vslicer = slice((- periods), None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
467,329,139,216,291,600
Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment.
pandas/core/generic.py
slice_shift
kapilepatel/pandas
python
def slice_shift(self, periods=1, axis=0): '\n Equivalent to `shift` without copying data. The shifted data will\n not include the dropped periods and the shifted axis will be smaller\n than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n ' if (periods == 0): return self if (periods > 0): vslicer = slice(None, (- periods)) islicer = slice(periods, None) else: vslicer = slice((- periods), None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0): "\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, default None\n Increment to use from the tseries module or time rule (e.g. 'EOM')\n axis : int or basestring\n Corresponds to the axis that contains the Index\n\n Returns\n -------\n shifted : NDFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n " index = self._get_axis(axis) if (freq is None): freq = getattr(index, 'freq', None) if (freq is None): freq = getattr(index, 'inferred_freq', None) if (freq is None): msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if (periods == 0): return self if isinstance(freq, string_types): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if (freq == orig_freq): new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self)
225,478,747,202,192,930
Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown
pandas/core/generic.py
tshift
kapilepatel/pandas
python
def tshift(self, periods=1, freq=None, axis=0): "\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, default None\n Increment to use from the tseries module or time rule (e.g. 'EOM')\n axis : int or basestring\n Corresponds to the axis that contains the Index\n\n Returns\n -------\n shifted : NDFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n " index = self._get_axis(axis) if (freq is None): freq = getattr(index, 'freq', None) if (freq is None): freq = getattr(index, 'inferred_freq', None) if (freq is None): msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if (periods == 0): return self if isinstance(freq, string_types): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if (freq == orig_freq): new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True): '\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, string, int\n Truncate all rows before this index value.\n after : date, string, int\n Truncate all rows after this index value.\n axis : {0 or \'index\', 1 or \'columns\'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : boolean, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'A\': [\'a\', \'b\', \'c\', \'d\', \'e\'],\n ... \'B\': [\'f\', \'g\', \'h\', \'i\', \'j\'],\n ... \'C\': [\'k\', \'l\', \'m\', \'n\', \'o\']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before="A", after="B", axis="columns")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df[\'A\'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range(\'2016-01-01\', \'2016-02-01\', freq=\'s\')\n >>> df = pd.DataFrame(index=dates, data={\'A\': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp(\'2016-01-05\'),\n ... after=pd.Timestamp(\'2016-01-10\')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate(\'2016-01-05\', \'2016-01-10\').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc[\'2016-01-05\':\'2016-01-10\', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n ' if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) if ((not ax.is_monotonic_increasing) and (not ax.is_monotonic_decreasing)): raise ValueError('truncate requires a sorted index') if ax.is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if ((before is not None) and (after is not None)): if (before > after): raise ValueError(('Truncate: %s must be after %s' % (after, before))) slicer = ([slice(None, None)] * self._AXIS_LEN) slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result
-8,456,755,555,122,903,000
Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1
pandas/core/generic.py
truncate
kapilepatel/pandas
python
def truncate(self, before=None, after=None, axis=None, copy=True): '\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, string, int\n Truncate all rows before this index value.\n after : date, string, int\n Truncate all rows after this index value.\n axis : {0 or \'index\', 1 or \'columns\'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : boolean, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'A\': [\'a\', \'b\', \'c\', \'d\', \'e\'],\n ... \'B\': [\'f\', \'g\', \'h\', \'i\', \'j\'],\n ... \'C\': [\'k\', \'l\', \'m\', \'n\', \'o\']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before="A", after="B", axis="columns")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df[\'A\'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range(\'2016-01-01\', \'2016-02-01\', freq=\'s\')\n >>> df = pd.DataFrame(index=dates, data={\'A\': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp(\'2016-01-05\'),\n ... after=pd.Timestamp(\'2016-01-10\')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate(\'2016-01-05\', \'2016-01-10\').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc[\'2016-01-05\':\'2016-01-10\', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n ' if (axis is None): axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) if ((not ax.is_monotonic_increasing) and (not ax.is_monotonic_decreasing)): raise ValueError('truncate requires a sorted index') if ax.is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if ((before is not None) and (after is not None)): if (before > after): raise ValueError(('Truncate: %s must be after %s' % (after, before))) slicer = ([slice(None, None)] * self._AXIS_LEN) slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result
def tz_convert(self, tz, axis=0, level=None, copy=True): '\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to convert\n level : int, str, default None\n If axis ia a MultiIndex, convert a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n\n Returns\n -------\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n ' axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if (not hasattr(ax, 'tz_convert')): if (len(ax) > 0): ax_name = self._get_axis_name(axis) raise TypeError(('%s is not a valid DatetimeIndex or PeriodIndex' % ax_name)) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if (level not in (None, 0, ax.name)): raise ValueError('The level {0} is not valid'.format(level)) ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
3,158,875,932,094,216,000
Convert tz-aware axis to target time zone. Parameters ---------- tz : string or pytz.timezone object axis : the axis to convert level : int, str, default None If axis ia a MultiIndex, convert a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data Returns ------- Raises ------ TypeError If the axis is tz-naive.
pandas/core/generic.py
tz_convert
kapilepatel/pandas
python
def tz_convert(self, tz, axis=0, level=None, copy=True): '\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to convert\n level : int, str, default None\n If axis ia a MultiIndex, convert a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n\n Returns\n -------\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n ' axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if (not hasattr(ax, 'tz_convert')): if (len(ax) > 0): ax_name = self._get_axis_name(axis) raise TypeError(('%s is not a valid DatetimeIndex or PeriodIndex' % ax_name)) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if (level not in (None, 0, ax.name)): raise ValueError('The level {0} is not valid'.format(level)) ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): "\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid valuse are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7), index=pd.DatetimeIndex([\n ... '2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3), index=pd.DatetimeIndex([\n ... '2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2), index=pd.DatetimeIndex([\n ... '2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n " nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if ((nonexistent not in nonexistent_options) and (not isinstance(nonexistent, timedelta))): raise ValueError("The nonexistent argument must be one of 'raise', 'NaT', 'shift_forward', 'shift_backward' or a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if (not hasattr(ax, 'tz_localize')): if (len(ax) > 0): ax_name = self._get_axis_name(axis) raise TypeError(('%s is not a valid DatetimeIndex or PeriodIndex' % ax_name)) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if (level not in (None, 0, ax.name)): raise ValueError('The level {0} is not valid'.format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
3,579,135,569,935,609,300
Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid valuse are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64
pandas/core/generic.py
tz_localize
kapilepatel/pandas
python
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): "\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : string or pytz.timezone object\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None\n copy : boolean, default True\n Also make a copy of the underlying data\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid valuse are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7), index=pd.DatetimeIndex([\n ... '2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3), index=pd.DatetimeIndex([\n ... '2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2), index=pd.DatetimeIndex([\n ... '2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n " nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if ((nonexistent not in nonexistent_options) and (not isinstance(nonexistent, timedelta))): raise ValueError("The nonexistent argument must be one of 'raise', 'NaT', 'shift_forward', 'shift_backward' or a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if (not hasattr(ax, 'tz_localize')): if (len(ax) > 0): ax_name = self._get_axis_name(axis) raise TypeError(('%s is not a valid DatetimeIndex or PeriodIndex' % ax_name)) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if (level not in (None, 0, ax.name)): raise ValueError('The level {0} is not valid'.format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
def abs(self): "\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n " return np.abs(self)
-1,500,510,702,703,974,400
Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50
pandas/core/generic.py
abs
kapilepatel/pandas
python
def abs(self): "\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n " return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None): '\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset\'s distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : \'all\', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - \'all\' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=[\'O\'])``). To\n select pandas categorical columns, use ``\'category\'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=[\'O\'])``). To\n exclude pandas categorical columns, use ``\'category\'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the obersvations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result\'s index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result\'s index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value\'s\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include=\'all\'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series([\'a\', \'a\', \'b\', \'c\'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64("2000-01-01"),\n ... np.datetime64("2010-01-01"),\n ... np.datetime64("2010-01-01")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({\'categorical\': pd.Categorical([\'d\',\'e\',\'f\']),\n ... \'numeric\': [1, 2, 3],\n ... \'object\': [\'a\', \'b\', \'c\']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include=\'all\')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=[\'category\'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n ' if (self.ndim >= 3): msg = 'describe is not implemented on Panel objects.' raise NotImplementedError(msg) elif ((self.ndim == 2) and (self.columns.size == 0)): raise ValueError('Cannot describe a DataFrame without columns') if (percentiles is not None): percentiles = list(percentiles) self._check_percentile(percentiles) if (0.5 not in percentiles): percentiles.append(0.5) percentiles = np.asarray(percentiles) else: percentiles = np.array([0.25, 0.5, 0.75]) unique_pcts = np.unique(percentiles) if (len(unique_pcts) < len(percentiles)): raise ValueError('percentiles cannot contain duplicates') percentiles = unique_pcts formatted_percentiles = format_percentiles(percentiles) def describe_numeric_1d(series): stat_index = ((['count', 'mean', 'std', 'min'] + formatted_percentiles) + ['max']) d = (([series.count(), series.mean(), series.std(), series.min()] + series.quantile(percentiles).tolist()) + [series.max()]) return pd.Series(d, index=stat_index, name=series.name) def describe_categorical_1d(data): names = ['count', 'unique'] objcounts = data.value_counts() count_unique = len(objcounts[(objcounts != 0)]) result = [data.count(), count_unique] if (result[1] > 0): (top, freq) = (objcounts.index[0], objcounts.iloc[0]) if is_datetime64_any_dtype(data): tz = data.dt.tz asint = data.dropna().values.view('i8') top = Timestamp(top) if ((top.tzinfo is not None) and (tz is not None)): top = top.tz_convert(tz) else: top = top.tz_localize(tz) names += ['top', 'freq', 'first', 'last'] result += [top, freq, Timestamp(asint.min(), tz=tz), Timestamp(asint.max(), tz=tz)] else: names += ['top', 'freq'] result += [top, freq] return pd.Series(result, index=names, name=data.name) def describe_1d(data): if is_bool_dtype(data): return describe_categorical_1d(data) elif is_numeric_dtype(data): return describe_numeric_1d(data) elif is_timedelta64_dtype(data): return describe_numeric_1d(data) else: return describe_categorical_1d(data) if (self.ndim == 1): return describe_1d(self) elif ((include is None) and (exclude is None)): data = self.select_dtypes(include=[np.number]) if (len(data.columns) == 0): data = self elif (include == 'all'): if (exclude is not None): msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self else: data = self.select_dtypes(include=include, exclude=exclude) ldesc = [describe_1d(s) for (_, s) in data.iteritems()] names = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if (name not in names): names.append(name) d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1) d.columns = data.columns.copy() return d
-6,582,918,210,121,776,000
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the obersvations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 unique 2 top 2010-01-01 00:00:00 freq 2 first 2000-01-01 00:00:00 last 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN c freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[np.object]) object count 3 unique 3 top c freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top f freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) categorical object count 3 3 unique 3 3 top f c freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.object]) categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0
pandas/core/generic.py
describe
kapilepatel/pandas
python
def describe(self, percentiles=None, include=None, exclude=None): '\n Generate descriptive statistics that summarize the central tendency,\n dispersion and shape of a dataset\'s distribution, excluding\n ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : \'all\', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - \'all\' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=[\'O\'])``). To\n select pandas categorical columns, use ``\'category\'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=[\'O\'])``). To\n exclude pandas categorical columns, use ``\'category\'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the obersvations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result\'s index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result\'s index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value\'s\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include=\'all\'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series([\'a\', \'a\', \'b\', \'c\'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64("2000-01-01"),\n ... np.datetime64("2010-01-01"),\n ... np.datetime64("2010-01-01")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({\'categorical\': pd.Categorical([\'d\',\'e\',\'f\']),\n ... \'numeric\': [1, 2, 3],\n ... \'object\': [\'a\', \'b\', \'c\']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include=\'all\')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=[\'category\'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n ' if (self.ndim >= 3): msg = 'describe is not implemented on Panel objects.' raise NotImplementedError(msg) elif ((self.ndim == 2) and (self.columns.size == 0)): raise ValueError('Cannot describe a DataFrame without columns') if (percentiles is not None): percentiles = list(percentiles) self._check_percentile(percentiles) if (0.5 not in percentiles): percentiles.append(0.5) percentiles = np.asarray(percentiles) else: percentiles = np.array([0.25, 0.5, 0.75]) unique_pcts = np.unique(percentiles) if (len(unique_pcts) < len(percentiles)): raise ValueError('percentiles cannot contain duplicates') percentiles = unique_pcts formatted_percentiles = format_percentiles(percentiles) def describe_numeric_1d(series): stat_index = ((['count', 'mean', 'std', 'min'] + formatted_percentiles) + ['max']) d = (([series.count(), series.mean(), series.std(), series.min()] + series.quantile(percentiles).tolist()) + [series.max()]) return pd.Series(d, index=stat_index, name=series.name) def describe_categorical_1d(data): names = ['count', 'unique'] objcounts = data.value_counts() count_unique = len(objcounts[(objcounts != 0)]) result = [data.count(), count_unique] if (result[1] > 0): (top, freq) = (objcounts.index[0], objcounts.iloc[0]) if is_datetime64_any_dtype(data): tz = data.dt.tz asint = data.dropna().values.view('i8') top = Timestamp(top) if ((top.tzinfo is not None) and (tz is not None)): top = top.tz_convert(tz) else: top = top.tz_localize(tz) names += ['top', 'freq', 'first', 'last'] result += [top, freq, Timestamp(asint.min(), tz=tz), Timestamp(asint.max(), tz=tz)] else: names += ['top', 'freq'] result += [top, freq] return pd.Series(result, index=names, name=data.name) def describe_1d(data): if is_bool_dtype(data): return describe_categorical_1d(data) elif is_numeric_dtype(data): return describe_numeric_1d(data) elif is_timedelta64_dtype(data): return describe_numeric_1d(data) else: return describe_categorical_1d(data) if (self.ndim == 1): return describe_1d(self) elif ((include is None) and (exclude is None)): data = self.select_dtypes(include=[np.number]) if (len(data.columns) == 0): data = self elif (include == 'all'): if (exclude is not None): msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self else: data = self.select_dtypes(include=include, exclude=exclude) ldesc = [describe_1d(s) for (_, s) in data.iteritems()] names = [] ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if (name not in names): names.append(name) d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1) d.columns = data.columns.copy() return d
def _check_percentile(self, q): '\n Validate percentiles (used by describe and quantile).\n ' msg = 'percentiles should all be in the interval [0, 1]. Try {0} instead.' q = np.asarray(q) if (q.ndim == 0): if (not (0 <= q <= 1)): raise ValueError(msg.format((q / 100.0))) elif (not all(((0 <= qs <= 1) for qs in q))): raise ValueError(msg.format((q / 100.0))) return q
7,541,455,634,732,215,000
Validate percentiles (used by describe and quantile).
pandas/core/generic.py
_check_percentile
kapilepatel/pandas
python
def _check_percentile(self, q): '\n \n ' msg = 'percentiles should all be in the interval [0, 1]. Try {0} instead.' q = np.asarray(q) if (q.ndim == 0): if (not (0 <= q <= 1)): raise ValueError(msg.format((q / 100.0))) elif (not all(((0 <= qs <= 1) for qs in q))): raise ValueError(msg.format((q / 100.0))) return q
@classmethod def _add_numeric_operations(cls): '\n Add the operations to the cls; evaluate the doc strings again\n ' (axis_descr, name, name2) = _doc_parms(cls) cls.any = _make_logical_function(cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany, _any_see_also, _any_examples, empty_value=False) cls.all = _make_logical_function(cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall, _all_see_also, _all_examples, empty_value=True) @Substitution(desc='Return the mean absolute deviation of the values for the requested axis.', name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if (skipna is None): skipna = True if (axis is None): axis = self._stat_axis_number if (level is not None): return self._agg_by_level('mad', axis=axis, level=level, skipna=skipna) data = self._get_numeric_data() if (axis == 0): demeaned = (data - data.mean(axis=0)) else: demeaned = data.sub(data.mean(axis=1), axis=0) return np.abs(demeaned).mean(axis=axis, skipna=skipna) cls.mad = mad cls.sem = _make_stat_function_ddof(cls, 'sem', name, name2, axis_descr, 'Return unbiased standard error of the mean over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nansem) cls.var = _make_stat_function_ddof(cls, 'var', name, name2, axis_descr, 'Return unbiased variance over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nanvar) cls.std = _make_stat_function_ddof(cls, 'std', name, name2, axis_descr, 'Return sample standard deviation over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nanstd) @Substitution(desc='Return the compound percentage of the values for the requested axis.', name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if (skipna is None): skipna = True return ((1 + self).prod(axis=axis, skipna=skipna, level=level) - 1) cls.compound = compound cls.cummin = _make_cum_function(cls, 'cummin', name, name2, axis_descr, 'minimum', (lambda y, axis: np.minimum.accumulate(y, axis)), 'min', np.inf, np.nan, _cummin_examples) cls.cumsum = _make_cum_function(cls, 'cumsum', name, name2, axis_descr, 'sum', (lambda y, axis: y.cumsum(axis)), 'sum', 0.0, np.nan, _cumsum_examples) cls.cumprod = _make_cum_function(cls, 'cumprod', name, name2, axis_descr, 'product', (lambda y, axis: y.cumprod(axis)), 'prod', 1.0, np.nan, _cumprod_examples) cls.cummax = _make_cum_function(cls, 'cummax', name, name2, axis_descr, 'maximum', (lambda y, axis: np.maximum.accumulate(y, axis)), 'max', (- np.inf), np.nan, _cummax_examples) cls.sum = _make_min_count_stat_function(cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis.\n\n This is equivalent to the method ``numpy.sum``.', nanops.nansum, _stat_func_see_also, _sum_examples) cls.mean = _make_stat_function(cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis.', nanops.nanmean) cls.skew = _make_stat_function(cls, 'skew', name, name2, axis_descr, 'Return unbiased skew over requested axis\nNormalized by N-1.', nanops.nanskew) cls.kurt = _make_stat_function(cls, 'kurt', name, name2, axis_descr, "Return unbiased kurtosis over requested axis using Fisher's definition of\nkurtosis (kurtosis of normal == 0.0). Normalized by N-1.", nanops.nankurt) cls.kurtosis = cls.kurt cls.prod = _make_min_count_stat_function(cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis.', nanops.nanprod, examples=_prod_examples) cls.product = cls.prod cls.median = _make_stat_function(cls, 'median', name, name2, axis_descr, 'Return the median of the values for the requested axis.', nanops.nanmedian) cls.max = _make_stat_function(cls, 'max', name, name2, axis_descr, 'Return the maximum of the values for the requested axis.\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.', nanops.nanmax, _stat_func_see_also, _max_examples) cls.min = _make_stat_function(cls, 'min', name, name2, axis_descr, 'Return the minimum of the values for the requested axis.\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.', nanops.nanmin, _stat_func_see_also, _min_examples)
5,735,241,837,271,939,000
Add the operations to the cls; evaluate the doc strings again
pandas/core/generic.py
_add_numeric_operations
kapilepatel/pandas
python
@classmethod def _add_numeric_operations(cls): '\n \n ' (axis_descr, name, name2) = _doc_parms(cls) cls.any = _make_logical_function(cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany, _any_see_also, _any_examples, empty_value=False) cls.all = _make_logical_function(cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall, _all_see_also, _all_examples, empty_value=True) @Substitution(desc='Return the mean absolute deviation of the values for the requested axis.', name1=name, name2=name2, axis_descr=axis_descr, min_count=, see_also=, examples=) @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if (skipna is None): skipna = True if (axis is None): axis = self._stat_axis_number if (level is not None): return self._agg_by_level('mad', axis=axis, level=level, skipna=skipna) data = self._get_numeric_data() if (axis == 0): demeaned = (data - data.mean(axis=0)) else: demeaned = data.sub(data.mean(axis=1), axis=0) return np.abs(demeaned).mean(axis=axis, skipna=skipna) cls.mad = mad cls.sem = _make_stat_function_ddof(cls, 'sem', name, name2, axis_descr, 'Return unbiased standard error of the mean over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nansem) cls.var = _make_stat_function_ddof(cls, 'var', name, name2, axis_descr, 'Return unbiased variance over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nanvar) cls.std = _make_stat_function_ddof(cls, 'std', name, name2, axis_descr, 'Return sample standard deviation over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument', nanops.nanstd) @Substitution(desc='Return the compound percentage of the values for the requested axis.', name1=name, name2=name2, axis_descr=axis_descr, min_count=, see_also=, examples=) @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if (skipna is None): skipna = True return ((1 + self).prod(axis=axis, skipna=skipna, level=level) - 1) cls.compound = compound cls.cummin = _make_cum_function(cls, 'cummin', name, name2, axis_descr, 'minimum', (lambda y, axis: np.minimum.accumulate(y, axis)), 'min', np.inf, np.nan, _cummin_examples) cls.cumsum = _make_cum_function(cls, 'cumsum', name, name2, axis_descr, 'sum', (lambda y, axis: y.cumsum(axis)), 'sum', 0.0, np.nan, _cumsum_examples) cls.cumprod = _make_cum_function(cls, 'cumprod', name, name2, axis_descr, 'product', (lambda y, axis: y.cumprod(axis)), 'prod', 1.0, np.nan, _cumprod_examples) cls.cummax = _make_cum_function(cls, 'cummax', name, name2, axis_descr, 'maximum', (lambda y, axis: np.maximum.accumulate(y, axis)), 'max', (- np.inf), np.nan, _cummax_examples) cls.sum = _make_min_count_stat_function(cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis.\n\n This is equivalent to the method ``numpy.sum``.', nanops.nansum, _stat_func_see_also, _sum_examples) cls.mean = _make_stat_function(cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis.', nanops.nanmean) cls.skew = _make_stat_function(cls, 'skew', name, name2, axis_descr, 'Return unbiased skew over requested axis\nNormalized by N-1.', nanops.nanskew) cls.kurt = _make_stat_function(cls, 'kurt', name, name2, axis_descr, "Return unbiased kurtosis over requested axis using Fisher's definition of\nkurtosis (kurtosis of normal == 0.0). Normalized by N-1.", nanops.nankurt) cls.kurtosis = cls.kurt cls.prod = _make_min_count_stat_function(cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis.', nanops.nanprod, examples=_prod_examples) cls.product = cls.prod cls.median = _make_stat_function(cls, 'median', name, name2, axis_descr, 'Return the median of the values for the requested axis.', nanops.nanmedian) cls.max = _make_stat_function(cls, 'max', name, name2, axis_descr, 'Return the maximum of the values for the requested axis.\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.', nanops.nanmax, _stat_func_see_also, _max_examples) cls.min = _make_stat_function(cls, 'min', name, name2, axis_descr, 'Return the minimum of the values for the requested axis.\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.', nanops.nanmin, _stat_func_see_also, _min_examples)
@classmethod def _add_series_only_operations(cls): '\n Add the series only operations to the cls; evaluate the doc\n strings again.\n ' (axis_descr, name, name2) = _doc_parms(cls) def nanptp(values, axis=0, skipna=True): nmax = nanops.nanmax(values, axis, skipna) nmin = nanops.nanmin(values, axis, skipna) warnings.warn('Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.', FutureWarning, stacklevel=4) return (nmax - nmin) cls.ptp = _make_stat_function(cls, 'ptp', name, name2, axis_descr, 'Return the difference between the maximum value and the\n minimum value in the object. This is the equivalent of the\n ``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0\n Use numpy.ptp instead', nanptp)
1,031,498,453,064,267,600
Add the series only operations to the cls; evaluate the doc strings again.
pandas/core/generic.py
_add_series_only_operations
kapilepatel/pandas
python
@classmethod def _add_series_only_operations(cls): '\n Add the series only operations to the cls; evaluate the doc\n strings again.\n ' (axis_descr, name, name2) = _doc_parms(cls) def nanptp(values, axis=0, skipna=True): nmax = nanops.nanmax(values, axis, skipna) nmin = nanops.nanmin(values, axis, skipna) warnings.warn('Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.', FutureWarning, stacklevel=4) return (nmax - nmin) cls.ptp = _make_stat_function(cls, 'ptp', name, name2, axis_descr, 'Return the difference between the maximum value and the\n minimum value in the object. This is the equivalent of the\n ``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0\n Use numpy.ptp instead', nanptp)
@classmethod def _add_series_or_dataframe_operations(cls): '\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n ' from pandas.core import window as rwindow @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed) cls.rolling = rolling @Appender(rwindow.expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) return rwindow.expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding @Appender(rwindow.ewm.__doc__) def ewm(self, com=None, span=None, halflife=None, alpha=None, min_periods=0, adjust=True, ignore_na=False, axis=0): axis = self._get_axis_number(axis) return rwindow.ewm(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis) cls.ewm = ewm
194,633,612,761,821,980
Add the series or dataframe only operations to the cls; evaluate the doc strings again.
pandas/core/generic.py
_add_series_or_dataframe_operations
kapilepatel/pandas
python
@classmethod def _add_series_or_dataframe_operations(cls): '\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n ' from pandas.core import window as rwindow @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed) cls.rolling = rolling @Appender(rwindow.expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) return rwindow.expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding @Appender(rwindow.ewm.__doc__) def ewm(self, com=None, span=None, halflife=None, alpha=None, min_periods=0, adjust=True, ignore_na=False, axis=0): axis = self._get_axis_number(axis) return rwindow.ewm(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis) cls.ewm = ewm
def _find_valid_index(self, how): "\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n " assert (how in ['first', 'last']) if (len(self) == 0): return None is_valid = (~ self.isna()) if (self.ndim == 2): is_valid = is_valid.any(1) if (how == 'first'): idxpos = is_valid.values[:].argmax() if (how == 'last'): idxpos = ((len(self) - 1) - is_valid.values[::(- 1)].argmax()) chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if (not chk_notna): return None return idx
-2,625,748,619,487,744,500
Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index
pandas/core/generic.py
_find_valid_index
kapilepatel/pandas
python
def _find_valid_index(self, how): "\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n " assert (how in ['first', 'last']) if (len(self) == 0): return None is_valid = (~ self.isna()) if (self.ndim == 2): is_valid = is_valid.any(1) if (how == 'first'): idxpos = is_valid.values[:].argmax() if (how == 'last'): idxpos = ((len(self) - 1) - is_valid.values[::(- 1)].argmax()) chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if (not chk_notna): return None return idx
def __init__(self): '\n V1RetrieveBusinessRequest - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n ' self.swagger_types = {} self.attribute_map = {}
-1,316,852,080,324,229,600
V1RetrieveBusinessRequest - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
squareconnect/models/v1_retrieve_business_request.py
__init__
reduceus/connect-python-sdk
python
def __init__(self): '\n V1RetrieveBusinessRequest - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n ' self.swagger_types = {} self.attribute_map = {}
def to_dict(self): '\n Returns the model properties as a dict\n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
2,191,974,537,531,847,000
Returns the model properties as a dict
squareconnect/models/v1_retrieve_business_request.py
to_dict
reduceus/connect-python-sdk
python
def to_dict(self): '\n \n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result