code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def fetch(
self,
selector: Union[pd.Timestamp, slice, str] = slice(None, None),
level: Union[str, int] = "datetime",
col_set=DataHandler.CS_ALL,
data_key: DATA_KEY_TYPE = DK_I,
squeeze: bool = False,
proc_func: Callable = None,
) -> pd.DataFrame:
"""
fetch data from underlying data source
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index.
level : Union[str, int]
which index level to select the data.
col_set : str
select a set of meaningful columns.(e.g. features, columns).
data_key : str
the data to fetch: DK_*.
proc_func: Callable
please refer to the doc of DataHandler.fetch
Returns
-------
pd.DataFrame:
"""
return self._fetch_data(
data_storage=self._get_df_by_key(data_key),
selector=selector,
level=level,
col_set=col_set,
squeeze=squeeze,
proc_func=proc_func,
)
|
fetch data from underlying data source
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index.
level : Union[str, int]
which index level to select the data.
col_set : str
select a set of meaningful columns.(e.g. features, columns).
data_key : str
the data to fetch: DK_*.
proc_func: Callable
please refer to the doc of DataHandler.fetch
Returns
-------
pd.DataFrame:
|
fetch
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def get_cols(self, col_set=DataHandler.CS_ALL, data_key: DATA_KEY_TYPE = DK_I) -> list:
"""
get the column names
Parameters
----------
col_set : str
select a set of meaningful columns.(e.g. features, columns).
data_key : DATA_KEY_TYPE
the data to fetch: DK_*.
Returns
-------
list:
list of column names
"""
df = self._get_df_by_key(data_key).head()
df = fetch_df_by_col(df, col_set)
return df.columns.to_list()
|
get the column names
Parameters
----------
col_set : str
select a set of meaningful columns.(e.g. features, columns).
data_key : DATA_KEY_TYPE
the data to fetch: DK_*.
Returns
-------
list:
list of column names
|
get_cols
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def cast(cls, handler: "DataHandlerLP") -> "DataHandlerLP":
"""
Motivation
- A user creates a datahandler in his customized package. Then he wants to share the processed handler to
other users without introduce the package dependency and complicated data processing logic.
- This class make it possible by casting the class to DataHandlerLP and only keep the processed data
Parameters
----------
handler : DataHandlerLP
A subclass of DataHandlerLP
Returns
-------
DataHandlerLP:
the converted processed data
"""
new_hd: DataHandlerLP = object.__new__(DataHandlerLP)
new_hd.from_cast = True # add a mark for the cast instance
for key in list(DataHandlerLP.ATTR_MAP.values()) + [
"instruments",
"start_time",
"end_time",
"fetch_orig",
"drop_raw",
]:
setattr(new_hd, key, getattr(handler, key, None))
return new_hd
|
Motivation
- A user creates a datahandler in his customized package. Then he wants to share the processed handler to
other users without introduce the package dependency and complicated data processing logic.
- This class make it possible by casting the class to DataHandlerLP and only keep the processed data
Parameters
----------
handler : DataHandlerLP
A subclass of DataHandlerLP
Returns
-------
DataHandlerLP:
the converted processed data
|
cast
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def load(self, instruments, start_time=None, end_time=None) -> pd.DataFrame:
"""
load the data as pd.DataFrame.
Example of the data (The multi-index of the columns is optional.):
.. code-block:: text
feature label
$close $volume Ref($close, 1) Mean($close, 3) $high-$low LABEL0
datetime instrument
2010-01-04 SH600000 81.807068 17145150.0 83.737389 83.016739 2.741058 0.0032
SH600004 13.313329 11800983.0 13.313329 13.317701 0.183632 0.0042
SH600005 37.796539 12231662.0 38.258602 37.919757 0.970325 0.0289
Parameters
----------
instruments : str or dict
it can either be the market name or the config file of instruments generated by InstrumentProvider.
If the value of instruments is None, it means that no filtering is done.
start_time : str
start of the time range.
end_time : str
end of the time range.
Returns
-------
pd.DataFrame:
data load from the under layer source
Raise
-----
KeyError:
if the instruments filter is not supported, raise KeyError
"""
|
load the data as pd.DataFrame.
Example of the data (The multi-index of the columns is optional.):
.. code-block:: text
feature label
$close $volume Ref($close, 1) Mean($close, 3) $high-$low LABEL0
datetime instrument
2010-01-04 SH600000 81.807068 17145150.0 83.737389 83.016739 2.741058 0.0032
SH600004 13.313329 11800983.0 13.313329 13.317701 0.183632 0.0042
SH600005 37.796539 12231662.0 38.258602 37.919757 0.970325 0.0289
Parameters
----------
instruments : str or dict
it can either be the market name or the config file of instruments generated by InstrumentProvider.
If the value of instruments is None, it means that no filtering is done.
start_time : str
start of the time range.
end_time : str
end of the time range.
Returns
-------
pd.DataFrame:
data load from the under layer source
Raise
-----
KeyError:
if the instruments filter is not supported, raise KeyError
|
load
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def __init__(self, config: Union[list, tuple, dict]):
"""
Parameters
----------
config : Union[list, tuple, dict]
Config will be used to describe the fields and column names
.. code-block::
<config> := {
"group_name1": <fields_info1>
"group_name2": <fields_info2>
}
or
<config> := <fields_info>
<fields_info> := ["expr", ...] | (["expr", ...], ["col_name", ...])
# NOTE: list or tuple will be treated as the things when parsing
"""
self.is_group = isinstance(config, dict)
if self.is_group:
self.fields = {grp: self._parse_fields_info(fields_info) for grp, fields_info in config.items()}
else:
self.fields = self._parse_fields_info(config)
|
Parameters
----------
config : Union[list, tuple, dict]
Config will be used to describe the fields and column names
.. code-block::
<config> := {
"group_name1": <fields_info1>
"group_name2": <fields_info2>
}
or
<config> := <fields_info>
<fields_info> := ["expr", ...] | (["expr", ...], ["col_name", ...])
# NOTE: list or tuple will be treated as the things when parsing
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def load_group_df(
self,
instruments,
exprs: list,
names: list,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
gp_name: str = None,
) -> pd.DataFrame:
"""
load the dataframe for specific group
Parameters
----------
instruments :
the instruments.
exprs : list
the expressions to describe the content of the data.
names : list
the name of the data.
Returns
-------
pd.DataFrame:
the queried dataframe.
"""
|
load the dataframe for specific group
Parameters
----------
instruments :
the instruments.
exprs : list
the expressions to describe the content of the data.
names : list
the name of the data.
Returns
-------
pd.DataFrame:
the queried dataframe.
|
load_group_df
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def __init__(
self,
config: Tuple[list, tuple, dict],
filter_pipe: List = None,
swap_level: bool = True,
freq: Union[str, dict] = "day",
inst_processors: Union[dict, list] = None,
):
"""
Parameters
----------
config : Tuple[list, tuple, dict]
Please refer to the doc of DLWParser
filter_pipe :
Filter pipe for the instruments
swap_level :
Whether to swap level of MultiIndex
freq: dict or str
If type(config) == dict and type(freq) == str, load config data using freq.
If type(config) == dict and type(freq) == dict, load config[<group_name>] data using freq[<group_name>]
inst_processors: dict | list
If inst_processors is not None and type(config) == dict; load config[<group_name>] data using inst_processors[<group_name>]
If inst_processors is a list, then it will be applied to all groups.
"""
self.filter_pipe = filter_pipe
self.swap_level = swap_level
self.freq = freq
# sample
self.inst_processors = inst_processors if inst_processors is not None else {}
assert isinstance(
self.inst_processors, (dict, list)
), f"inst_processors(={self.inst_processors}) must be dict or list"
super().__init__(config)
if self.is_group:
# check sample config
if isinstance(freq, dict):
for _gp in config.keys():
if _gp not in freq:
raise ValueError(f"freq(={freq}) missing group(={_gp})")
assert (
self.inst_processors
), f"freq(={self.freq}), inst_processors(={self.inst_processors}) cannot be None/empty"
|
Parameters
----------
config : Tuple[list, tuple, dict]
Please refer to the doc of DLWParser
filter_pipe :
Filter pipe for the instruments
swap_level :
Whether to swap level of MultiIndex
freq: dict or str
If type(config) == dict and type(freq) == str, load config data using freq.
If type(config) == dict and type(freq) == dict, load config[<group_name>] data using freq[<group_name>]
inst_processors: dict | list
If inst_processors is not None and type(config) == dict; load config[<group_name>] data using inst_processors[<group_name>]
If inst_processors is a list, then it will be applied to all groups.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def __init__(self, config: Union[dict, str, pd.DataFrame], join="outer"):
"""
Parameters
----------
config : dict
{fields_group: <path or object>}
join : str
How to align different dataframes
"""
self._config = config # using "_" to avoid confliction with the method `config` of Serializable
self.join = join
self._data = None
|
Parameters
----------
config : dict
{fields_group: <path or object>}
join : str
How to align different dataframes
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def __init__(self, dataloader_l: List[Dict], join="left") -> None:
"""
Parameters
----------
dataloader_l : list[dict]
A list of dataloader, for exmaple
.. code-block:: python
nd = NestedDataLoader(
dataloader_l=[
{
"class": "qlib.contrib.data.loader.Alpha158DL",
}, {
"class": "qlib.contrib.data.loader.Alpha360DL",
"kwargs": {
"config": {
"label": ( ["Ref($close, -2)/Ref($close, -1) - 1"], ["LABEL0"])
}
}
}
]
)
join :
it will pass to pd.concat when merging it.
"""
super().__init__()
self.data_loader_l = [
(dl if isinstance(dl, DataLoader) else init_instance_by_config(dl)) for dl in dataloader_l
]
self.join = join
|
Parameters
----------
dataloader_l : list[dict]
A list of dataloader, for exmaple
.. code-block:: python
nd = NestedDataLoader(
dataloader_l=[
{
"class": "qlib.contrib.data.loader.Alpha158DL",
}, {
"class": "qlib.contrib.data.loader.Alpha360DL",
"kwargs": {
"config": {
"label": ( ["Ref($close, -2)/Ref($close, -1) - 1"], ["LABEL0"])
}
}
}
]
)
join :
it will pass to pd.concat when merging it.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def __init__(self, handler_config: dict, fetch_kwargs: dict = {}, is_group=False):
"""
Parameters
----------
handler_config : dict
handler_config will be used to describe the handlers
.. code-block::
<handler_config> := {
"group_name1": <handler>
"group_name2": <handler>
}
or
<handler_config> := <handler>
<handler> := DataHandler Instance | DataHandler Config
fetch_kwargs : dict
fetch_kwargs will be used to describe the different arguments of fetch method, such as col_set, squeeze, data_key, etc.
is_group: bool
is_group will be used to describe whether the key of handler_config is group
"""
from qlib.data.dataset.handler import DataHandler # pylint: disable=C0415
if is_group:
self.handlers = {
grp: init_instance_by_config(config, accept_types=DataHandler) for grp, config in handler_config.items()
}
else:
self.handlers = init_instance_by_config(handler_config, accept_types=DataHandler)
self.is_group = is_group
self.fetch_kwargs = {"col_set": DataHandler.CS_RAW}
self.fetch_kwargs.update(fetch_kwargs)
|
Parameters
----------
handler_config : dict
handler_config will be used to describe the handlers
.. code-block::
<handler_config> := {
"group_name1": <handler>
"group_name2": <handler>
}
or
<handler_config> := <handler>
<handler> := DataHandler Instance | DataHandler Config
fetch_kwargs : dict
fetch_kwargs will be used to describe the different arguments of fetch method, such as col_set, squeeze, data_key, etc.
is_group: bool
is_group will be used to describe whether the key of handler_config is group
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/loader.py
|
MIT
|
def get_group_columns(df: pd.DataFrame, group: Union[Text, None]):
"""
get a group of columns from multi-index columns DataFrame
Parameters
----------
df : pd.DataFrame
with multi of columns.
group : str
the name of the feature group, i.e. the first level value of the group index.
"""
if group is None:
return df.columns
else:
return df.columns[df.columns.get_loc(group)]
|
get a group of columns from multi-index columns DataFrame
Parameters
----------
df : pd.DataFrame
with multi of columns.
group : str
the name of the feature group, i.e. the first level value of the group index.
|
get_group_columns
|
python
|
microsoft/qlib
|
qlib/data/dataset/processor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/processor.py
|
MIT
|
def fit(self, df: pd.DataFrame = None):
"""
learn data processing parameters
Parameters
----------
df : pd.DataFrame
When we fit and process data with processor one by one. The fit function reiles on the output of previous
processor, i.e. `df`.
"""
|
learn data processing parameters
Parameters
----------
df : pd.DataFrame
When we fit and process data with processor one by one. The fit function reiles on the output of previous
processor, i.e. `df`.
|
fit
|
python
|
microsoft/qlib
|
qlib/data/dataset/processor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/processor.py
|
MIT
|
def __call__(self, df: pd.DataFrame):
"""
process the data
NOTE: **The processor could change the content of `df` inplace !!!!! **
User should keep a copy of data outside
Parameters
----------
df : pd.DataFrame
The raw_df of handler or result from previous processor.
"""
|
process the data
NOTE: **The processor could change the content of `df` inplace !!!!! **
User should keep a copy of data outside
Parameters
----------
df : pd.DataFrame
The raw_df of handler or result from previous processor.
|
__call__
|
python
|
microsoft/qlib
|
qlib/data/dataset/processor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/processor.py
|
MIT
|
def __init__(
self,
start_time: Optional[Union[pd.Timestamp, str]] = None,
end_time: Optional[Union[pd.Timestamp, str]] = None,
freq: str = "day",
):
"""
Parameters
----------
start_time : Optional[Union[pd.Timestamp, str]]
The data must start earlier (or equal) than `start_time`
None indicates data will not be filtered based on `start_time`
end_time : Optional[Union[pd.Timestamp, str]]
similar to start_time
freq : str
The frequency of the calendar
"""
# Align to calendar before filtering
cal = D.calendar(start_time=start_time, end_time=end_time, freq=freq)
self.start_time = None if start_time is None else cal[0]
self.end_time = None if end_time is None else cal[-1]
|
Parameters
----------
start_time : Optional[Union[pd.Timestamp, str]]
The data must start earlier (or equal) than `start_time`
None indicates data will not be filtered based on `start_time`
end_time : Optional[Union[pd.Timestamp, str]]
similar to start_time
freq : str
The frequency of the calendar
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/processor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/processor.py
|
MIT
|
def fetch(
self,
selector: Union[pd.Timestamp, slice, str, list] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = DataHandler.CS_ALL,
fetch_orig: bool = True,
proc_func: Callable = None,
**kwargs,
) -> pd.DataFrame:
"""fetch data from the data storage
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
level : Union[str, int]
which index level to select the data
- if level is None, apply selector to df directly
col_set : Union[str, List[str]]
- if isinstance(col_set, str):
select a set of meaningful columns.(e.g. features, columns)
if col_set == DataHandler.CS_RAW:
the raw dataset will be returned.
- if isinstance(col_set, List[str]):
select several sets of meaningful columns, the returned data has multiple level
fetch_orig : bool
Return the original data instead of copy if possible.
proc_func: Callable
please refer to the doc of DataHandler.fetch
Returns
-------
pd.DataFrame
the dataframe fetched
"""
raise NotImplementedError("fetch is method not implemented!")
|
fetch data from the data storage
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
level : Union[str, int]
which index level to select the data
- if level is None, apply selector to df directly
col_set : Union[str, List[str]]
- if isinstance(col_set, str):
select a set of meaningful columns.(e.g. features, columns)
if col_set == DataHandler.CS_RAW:
the raw dataset will be returned.
- if isinstance(col_set, List[str]):
select several sets of meaningful columns, the returned data has multiple level
fetch_orig : bool
Return the original data instead of copy if possible.
proc_func: Callable
please refer to the doc of DataHandler.fetch
Returns
-------
pd.DataFrame
the dataframe fetched
|
fetch
|
python
|
microsoft/qlib
|
qlib/data/dataset/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/storage.py
|
MIT
|
def _fetch_hash_df_by_stock(self, selector, level):
"""fetch the data with stock selector
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
level : Union[str, int]
which index level to select the data
- if level is None, apply selector to df directly
- the `_fetch_hash_df_by_stock` will parse the stock selector in arg `selector`
Returns
-------
Dict
The dict whose key is stock_id, value is the stock's data
"""
stock_selector = slice(None)
time_selector = slice(None) # by default not filter by time.
if level is None:
# For directly applying.
if isinstance(selector, tuple) and self.stock_level < len(selector):
# full selector format
stock_selector = selector[self.stock_level]
time_selector = selector[1 - self.stock_level]
elif isinstance(selector, (list, str)) and self.stock_level == 0:
# only stock selector
stock_selector = selector
elif level in ("instrument", self.stock_level):
if isinstance(selector, tuple):
# NOTE: How could the stock level selector be a tuple?
stock_selector = selector[0]
raise TypeError(
"I forget why would this case appear. But I think it does not make sense. So we raise a error for that case."
)
elif isinstance(selector, (list, str)):
stock_selector = selector
if not isinstance(stock_selector, (list, str)) and stock_selector != slice(None):
raise TypeError(f"stock selector must be type str|list, or slice(None), rather than {stock_selector}")
if stock_selector == slice(None):
return self.hash_df, time_selector
if isinstance(stock_selector, str):
stock_selector = [stock_selector]
select_dict = dict()
for each_stock in sorted(stock_selector):
if each_stock in self.hash_df:
select_dict[each_stock] = self.hash_df[each_stock]
return select_dict, time_selector
|
fetch the data with stock selector
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
level : Union[str, int]
which index level to select the data
- if level is None, apply selector to df directly
- the `_fetch_hash_df_by_stock` will parse the stock selector in arg `selector`
Returns
-------
Dict
The dict whose key is stock_id, value is the stock's data
|
_fetch_hash_df_by_stock
|
python
|
microsoft/qlib
|
qlib/data/dataset/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/storage.py
|
MIT
|
def get_level_index(df: pd.DataFrame, level: Union[str, int]) -> int:
"""
get the level index of `df` given `level`
Parameters
----------
df : pd.DataFrame
data
level : Union[str, int]
index level
Returns
-------
int:
The level index in the multiple index
"""
if isinstance(level, str):
try:
return df.index.names.index(level)
except (AttributeError, ValueError):
# NOTE: If level index is not given in the data, the default level index will be ('datetime', 'instrument')
return ("datetime", "instrument").index(level)
elif isinstance(level, int):
return level
else:
raise NotImplementedError(f"This type of input is not supported")
|
get the level index of `df` given `level`
Parameters
----------
df : pd.DataFrame
data
level : Union[str, int]
index level
Returns
-------
int:
The level index in the multiple index
|
get_level_index
|
python
|
microsoft/qlib
|
qlib/data/dataset/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/utils.py
|
MIT
|
def fetch_df_by_index(
df: pd.DataFrame,
selector: Union[pd.Timestamp, slice, str, list, pd.Index],
level: Union[str, int],
fetch_orig=True,
) -> pd.DataFrame:
"""
fetch data from `data` with `selector` and `level`
selector are assumed to be well processed.
`fetch_df_by_index` is only responsible for get the right level
Parameters
----------
selector : Union[pd.Timestamp, slice, str, list]
selector
level : Union[int, str]
the level to use the selector
Returns
-------
Data of the given index.
"""
# level = None -> use selector directly
if level is None or isinstance(selector, pd.MultiIndex):
return df.loc(axis=0)[selector]
# Try to get the right index
idx_slc = (selector, slice(None, None))
if get_level_index(df, level) == 1:
idx_slc = idx_slc[1], idx_slc[0]
if fetch_orig:
for slc in idx_slc:
if slc != slice(None, None):
return df.loc[pd.IndexSlice[idx_slc],] # noqa: E231
else: # pylint: disable=W0120
return df
else:
return df.loc[pd.IndexSlice[idx_slc],] # noqa: E231
|
fetch data from `data` with `selector` and `level`
selector are assumed to be well processed.
`fetch_df_by_index` is only responsible for get the right level
Parameters
----------
selector : Union[pd.Timestamp, slice, str, list]
selector
level : Union[int, str]
the level to use the selector
Returns
-------
Data of the given index.
|
fetch_df_by_index
|
python
|
microsoft/qlib
|
qlib/data/dataset/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/utils.py
|
MIT
|
def convert_index_format(df: Union[pd.DataFrame, pd.Series], level: str = "datetime") -> Union[pd.DataFrame, pd.Series]:
"""
Convert the format of df.MultiIndex according to the following rules:
- If `level` is the first level of df.MultiIndex, do nothing
- If `level` is the second level of df.MultiIndex, swap the level of index.
NOTE:
the number of levels of df.MultiIndex should be 2
Parameters
----------
df : Union[pd.DataFrame, pd.Series]
raw DataFrame/Series
level : str, optional
the level that will be converted to the first one, by default "datetime"
Returns
-------
Union[pd.DataFrame, pd.Series]
converted DataFrame/Series
"""
if get_level_index(df, level=level) == 1:
df = df.swaplevel().sort_index()
return df
|
Convert the format of df.MultiIndex according to the following rules:
- If `level` is the first level of df.MultiIndex, do nothing
- If `level` is the second level of df.MultiIndex, swap the level of index.
NOTE:
the number of levels of df.MultiIndex should be 2
Parameters
----------
df : Union[pd.DataFrame, pd.Series]
raw DataFrame/Series
level : str, optional
the level that will be converted to the first one, by default "datetime"
Returns
-------
Union[pd.DataFrame, pd.Series]
converted DataFrame/Series
|
convert_index_format
|
python
|
microsoft/qlib
|
qlib/data/dataset/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/utils.py
|
MIT
|
def init_task_handler(task: dict) -> DataHandler:
"""
initialize the handler part of the task **inplace**
Parameters
----------
task : dict
the task to be handled
Returns
-------
Union[DataHandler, None]:
returns
"""
# avoid recursive import
from .handler import DataHandler # pylint: disable=C0415
h_conf = task["dataset"]["kwargs"].get("handler")
if h_conf is not None:
handler = init_instance_by_config(h_conf, accept_types=DataHandler)
task["dataset"]["kwargs"]["handler"] = handler
return handler
else:
raise ValueError("The task does not contains a handler part.")
|
initialize the handler part of the task **inplace**
Parameters
----------
task : dict
the task to be handled
Returns
-------
Union[DataHandler, None]:
returns
|
init_task_handler
|
python
|
microsoft/qlib
|
qlib/data/dataset/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/utils.py
|
MIT
|
def setup_data(self, **kwargs):
"""
Setup the data.
We split the setup_data function for following situation:
- User have a Dataset object with learned status on disk.
- User load the Dataset object from the disk.
- User call `setup_data` to load new data.
- User prepare data for model based on previous status.
"""
|
Setup the data.
We split the setup_data function for following situation:
- User have a Dataset object with learned status on disk.
- User load the Dataset object from the disk.
- User call `setup_data` to load new data.
- User prepare data for model based on previous status.
|
setup_data
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def prepare(self, **kwargs) -> object:
"""
The type of dataset depends on the model. (It could be pd.DataFrame, pytorch.DataLoader, etc.)
The parameters should specify the scope for the prepared data
The method should:
- process the data
- return the processed data
Returns
-------
object:
return the object
"""
|
The type of dataset depends on the model. (It could be pd.DataFrame, pytorch.DataLoader, etc.)
The parameters should specify the scope for the prepared data
The method should:
- process the data
- return the processed data
Returns
-------
object:
return the object
|
prepare
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def __init__(
self,
handler: Union[Dict, DataHandler],
segments: Dict[Text, Tuple],
fetch_kwargs: Dict = {},
**kwargs,
):
"""
Setup the underlying data.
Parameters
----------
handler : Union[dict, DataHandler]
handler could be:
- instance of `DataHandler`
- config of `DataHandler`. Please refer to `DataHandler`
segments : dict
Describe the options to segment the data.
Here are some examples:
.. code-block::
1) 'segments': {
'train': ("2008-01-01", "2014-12-31"),
'valid': ("2017-01-01", "2020-08-01",),
'test': ("2015-01-01", "2016-12-31",),
}
2) 'segments': {
'insample': ("2008-01-01", "2014-12-31"),
'outsample': ("2017-01-01", "2020-08-01",),
}
"""
self.handler: DataHandler = init_instance_by_config(handler, accept_types=DataHandler)
self.segments = segments.copy()
self.fetch_kwargs = copy(fetch_kwargs)
super().__init__(**kwargs)
|
Setup the underlying data.
Parameters
----------
handler : Union[dict, DataHandler]
handler could be:
- instance of `DataHandler`
- config of `DataHandler`. Please refer to `DataHandler`
segments : dict
Describe the options to segment the data.
Here are some examples:
.. code-block::
1) 'segments': {
'train': ("2008-01-01", "2014-12-31"),
'valid': ("2017-01-01", "2020-08-01",),
'test': ("2015-01-01", "2016-12-31",),
}
2) 'segments': {
'insample': ("2008-01-01", "2014-12-31"),
'outsample': ("2017-01-01", "2020-08-01",),
}
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def config(self, handler_kwargs: dict = None, **kwargs):
"""
Initialize the DatasetH
Parameters
----------
handler_kwargs : dict
Config of DataHandler, which could include the following arguments:
- arguments of DataHandler.conf_data, such as 'instruments', 'start_time' and 'end_time'.
kwargs : dict
Config of DatasetH, such as
- segments : dict
Config of segments which is same as 'segments' in self.__init__
"""
if handler_kwargs is not None:
self.handler.config(**handler_kwargs)
if "segments" in kwargs:
self.segments = deepcopy(kwargs.pop("segments"))
super().config(**kwargs)
|
Initialize the DatasetH
Parameters
----------
handler_kwargs : dict
Config of DataHandler, which could include the following arguments:
- arguments of DataHandler.conf_data, such as 'instruments', 'start_time' and 'end_time'.
kwargs : dict
Config of DatasetH, such as
- segments : dict
Config of segments which is same as 'segments' in self.__init__
|
config
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def setup_data(self, handler_kwargs: dict = None, **kwargs):
"""
Setup the Data
Parameters
----------
handler_kwargs : dict
init arguments of DataHandler, which could include the following arguments:
- init_type : Init Type of Handler
- enable_cache : whether to enable cache
"""
super().setup_data(**kwargs)
if handler_kwargs is not None:
self.handler.setup_data(**handler_kwargs)
|
Setup the Data
Parameters
----------
handler_kwargs : dict
init arguments of DataHandler, which could include the following arguments:
- init_type : Init Type of Handler
- enable_cache : whether to enable cache
|
setup_data
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _prepare_seg(self, slc, **kwargs):
"""
Give a query, retrieve the according data
Parameters
----------
slc : please refer to the docs of `prepare`
NOTE: it may not be an instance of slice. It may be a segment of `segments` from `def prepare`
"""
if hasattr(self, "fetch_kwargs"):
return self.handler.fetch(slc, **kwargs, **self.fetch_kwargs)
else:
return self.handler.fetch(slc, **kwargs)
|
Give a query, retrieve the according data
Parameters
----------
slc : please refer to the docs of `prepare`
NOTE: it may not be an instance of slice. It may be a segment of `segments` from `def prepare`
|
_prepare_seg
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def prepare(
self,
segments: Union[List[Text], Tuple[Text], Text, slice, pd.Index],
col_set=DataHandler.CS_ALL,
data_key=DataHandlerLP.DK_I,
**kwargs,
) -> Union[List[pd.DataFrame], pd.DataFrame]:
"""
Prepare the data for learning and inference.
Parameters
----------
segments : Union[List[Text], Tuple[Text], Text, slice]
Describe the scope of the data to be prepared
Here are some examples:
- 'train'
- ['train', 'valid']
col_set : str
The col_set will be passed to self.handler when fetching data.
TODO: make it automatic:
- select DK_I for test data
- select DK_L for training data.
data_key : str
The data to fetch: DK_*
Default is DK_I, which indicate fetching data for **inference**.
kwargs :
The parameters that kwargs may contain:
flt_col : str
It only exists in TSDatasetH, can be used to add a column of data(True or False) to filter data.
This parameter is only supported when it is an instance of TSDatasetH.
Returns
-------
Union[List[pd.DataFrame], pd.DataFrame]:
Raises
------
NotImplementedError:
"""
logger = get_module_logger("DatasetH")
seg_kwargs = {"col_set": col_set}
seg_kwargs.update(kwargs)
if "data_key" in getfullargspec(self.handler.fetch).args:
seg_kwargs["data_key"] = data_key
else:
logger.info(f"data_key[{data_key}] is ignored.")
# Conflictions may happen here
# - The fetched data and the segment key may both be string
# To resolve the confliction
# - The segment name will have higher priorities
# 1) Use it as segment name first
if isinstance(segments, str) and segments in self.segments:
return self._prepare_seg(self.segments[segments], **seg_kwargs)
if isinstance(segments, (list, tuple)) and all(seg in self.segments for seg in segments):
return [self._prepare_seg(self.segments[seg], **seg_kwargs) for seg in segments]
# 2) Use pass it directly to prepare a single seg
return self._prepare_seg(segments, **seg_kwargs)
|
Prepare the data for learning and inference.
Parameters
----------
segments : Union[List[Text], Tuple[Text], Text, slice]
Describe the scope of the data to be prepared
Here are some examples:
- 'train'
- ['train', 'valid']
col_set : str
The col_set will be passed to self.handler when fetching data.
TODO: make it automatic:
- select DK_I for test data
- select DK_L for training data.
data_key : str
The data to fetch: DK_*
Default is DK_I, which indicate fetching data for **inference**.
kwargs :
The parameters that kwargs may contain:
flt_col : str
It only exists in TSDatasetH, can be used to add a column of data(True or False) to filter data.
This parameter is only supported when it is an instance of TSDatasetH.
Returns
-------
Union[List[pd.DataFrame], pd.DataFrame]:
Raises
------
NotImplementedError:
|
prepare
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _get_extrema(segments, idx: int, cmp: Callable, key_func=pd.Timestamp):
"""it will act like sort and return the max value or None"""
candidate = None
for k, seg in segments.items():
point = seg[idx]
if point is None:
# None indicates unbounded, return directly
return None
elif candidate is None or cmp(key_func(candidate), key_func(point)):
candidate = point
return candidate
|
it will act like sort and return the max value or None
|
_get_extrema
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def __init__(
self,
data: pd.DataFrame,
start,
end,
step_len: int,
fillna_type: str = "none",
dtype=None,
flt_data=None,
):
"""
Build a dataset which looks like torch.data.utils.Dataset.
Parameters
----------
data : pd.DataFrame
The raw tabular data whose index order is <"datetime", "instrument">
start :
The indexable start time
end :
The indexable end time
step_len : int
The length of the time-series step
fillna_type : int
How will qlib handle the sample if there is on sample in a specific date.
none:
fill with np.nan
ffill:
ffill with previous sample
ffill+bfill:
ffill with previous samples first and fill with later samples second
flt_data : pd.Series
a column of data(True or False) to filter data. Its index order is <"datetime", "instrument">
None:
kepp all data
"""
self.start = start
self.end = end
self.step_len = step_len
self.fillna_type = fillna_type
assert get_level_index(data, "datetime") == 0
self.data = data.swaplevel().sort_index().copy()
data.drop(
data.columns, axis=1, inplace=True
) # data is useless since it's passed to a transposed one, hard code to free the memory of this dataframe to avoid three big dataframe in the memory(including: data, self.data, self.data_arr)
kwargs = {"object": self.data}
if dtype is not None:
kwargs["dtype"] = dtype
self.data_arr = np.array(**kwargs) # Get index from numpy.array will much faster than DataFrame.values!
# NOTE:
# - append last line with full NaN for better performance in `__getitem__`
# - Keep the same dtype will result in a better performance
self.data_arr = np.append(
self.data_arr,
np.full((1, self.data_arr.shape[1]), np.nan, dtype=self.data_arr.dtype),
axis=0,
)
self.nan_idx = len(self.data_arr) - 1 # The last line is all NaN; setting it to -1 can cause bug #1716
# the data type will be changed
# The index of usable data is between start_idx and end_idx
self.idx_df, self.idx_map = self.build_index(self.data)
self.data_index = deepcopy(self.data.index)
if flt_data is not None:
if isinstance(flt_data, pd.DataFrame):
assert len(flt_data.columns) == 1
flt_data = flt_data.iloc[:, 0]
# NOTE: bool(np.nan) is True !!!!!!!!
# make sure reindex comes first. Otherwise extra NaN may appear.
flt_data = flt_data.swaplevel()
flt_data = flt_data.reindex(self.data_index).fillna(False).astype(bool)
self.flt_data = flt_data.values
self.idx_map = self.flt_idx_map(self.flt_data, self.idx_map)
self.data_index = self.data_index[np.where(self.flt_data)[0]]
self.idx_map = self.idx_map2arr(self.idx_map)
self.idx_map, self.data_index = self.slice_idx_map_and_data_index(
self.idx_map, self.idx_df, self.data_index, start, end
)
self.idx_arr = np.array(self.idx_df.values, dtype=np.float64) # for better performance
del self.data # save memory
|
Build a dataset which looks like torch.data.utils.Dataset.
Parameters
----------
data : pd.DataFrame
The raw tabular data whose index order is <"datetime", "instrument">
start :
The indexable start time
end :
The indexable end time
step_len : int
The length of the time-series step
fillna_type : int
How will qlib handle the sample if there is on sample in a specific date.
none:
fill with np.nan
ffill:
ffill with previous sample
ffill+bfill:
ffill with previous samples first and fill with later samples second
flt_data : pd.Series
a column of data(True or False) to filter data. Its index order is <"datetime", "instrument">
None:
kepp all data
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def build_index(data: pd.DataFrame) -> Tuple[pd.DataFrame, dict]:
"""
The relation of the data
Parameters
----------
data : pd.DataFrame
A DataFrame with index in order <instrument, datetime>
RSQR5 RESI5 WVMA5 LABEL0
instrument datetime
SH600000 2017-01-03 0.016389 0.461632 -1.154788 -0.048056
2017-01-04 0.884545 -0.110597 -1.059332 -0.030139
2017-01-05 0.507540 -0.535493 -1.099665 -0.644983
2017-01-06 -1.267771 -0.669685 -1.636733 0.295366
2017-01-09 0.339346 0.074317 -0.984989 0.765540
Returns
-------
Tuple[pd.DataFrame, dict]:
1) the first element: reshape the original index into a <datetime(row), instrument(column)> 2D dataframe
instrument SH600000 SH600008 SH600009 SH600010 SH600011 SH600015 ...
datetime
2017-01-03 0 242 473 717 NaN 974 ...
2017-01-04 1 243 474 718 NaN 975 ...
2017-01-05 2 244 475 719 NaN 976 ...
2017-01-06 3 245 476 720 NaN 977 ...
2) the second element: {<original index>: <row, col>}
"""
# object incase of pandas converting int to float
idx_df = pd.Series(range(data.shape[0]), index=data.index, dtype=object)
idx_df = lazy_sort_index(idx_df.unstack())
# NOTE: the correctness of `__getitem__` depends on columns sorted here
idx_df = lazy_sort_index(idx_df, axis=1).T
idx_map = {}
for i, (_, row) in enumerate(idx_df.iterrows()):
for j, real_idx in enumerate(row):
if not np.isnan(real_idx):
idx_map[real_idx] = (i, j)
return idx_df, idx_map
|
The relation of the data
Parameters
----------
data : pd.DataFrame
A DataFrame with index in order <instrument, datetime>
RSQR5 RESI5 WVMA5 LABEL0
instrument datetime
SH600000 2017-01-03 0.016389 0.461632 -1.154788 -0.048056
2017-01-04 0.884545 -0.110597 -1.059332 -0.030139
2017-01-05 0.507540 -0.535493 -1.099665 -0.644983
2017-01-06 -1.267771 -0.669685 -1.636733 0.295366
2017-01-09 0.339346 0.074317 -0.984989 0.765540
Returns
-------
Tuple[pd.DataFrame, dict]:
1) the first element: reshape the original index into a <datetime(row), instrument(column)> 2D dataframe
instrument SH600000 SH600008 SH600009 SH600010 SH600011 SH600015 ...
datetime
2017-01-03 0 242 473 717 NaN 974 ...
2017-01-04 1 243 474 718 NaN 975 ...
2017-01-05 2 244 475 719 NaN 976 ...
2017-01-06 3 245 476 720 NaN 977 ...
2) the second element: {<original index>: <row, col>}
|
build_index
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _get_indices(self, row: int, col: int) -> np.array:
"""
get series indices of self.data_arr from the row, col indices of self.idx_df
Parameters
----------
row : int
the row in self.idx_df
col : int
the col in self.idx_df
Returns
-------
np.array:
The indices of data of the data
"""
indices = self.idx_arr[max(row - self.step_len + 1, 0) : row + 1, col]
if len(indices) < self.step_len:
indices = np.concatenate([np.full((self.step_len - len(indices),), np.nan), indices])
if self.fillna_type == "ffill":
indices = np_ffill(indices)
elif self.fillna_type == "ffill+bfill":
indices = np_ffill(np_ffill(indices)[::-1])[::-1]
else:
assert self.fillna_type == "none"
return indices
|
get series indices of self.data_arr from the row, col indices of self.idx_df
Parameters
----------
row : int
the row in self.idx_df
col : int
the col in self.idx_df
Returns
-------
np.array:
The indices of data of the data
|
_get_indices
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _get_row_col(self, idx) -> Tuple[int]:
"""
get the col index and row index of a given sample index in self.idx_df
Parameters
----------
idx :
the input of `__getitem__`
Returns
-------
Tuple[int]:
the row and col index
"""
# The the right row number `i` and col number `j` in idx_df
if isinstance(idx, (int, np.integer)):
real_idx = idx
if 0 <= real_idx < len(self.idx_map):
i, j = self.idx_map[real_idx] # TODO: The performance of this line is not good
else:
raise KeyError(f"{real_idx} is out of [0, {len(self.idx_map)})")
elif isinstance(idx, tuple):
# <TSDataSampler object>["datetime", "instruments"]
date, inst = idx
date = pd.Timestamp(date)
i = bisect.bisect_right(self.idx_df.index, date) - 1
# NOTE: This relies on the idx_df columns sorted in `__init__`
j = bisect.bisect_left(self.idx_df.columns, inst)
else:
raise NotImplementedError(f"This type of input is not supported")
return i, j
|
get the col index and row index of a given sample index in self.idx_df
Parameters
----------
idx :
the input of `__getitem__`
Returns
-------
Tuple[int]:
the row and col index
|
_get_row_col
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def __getitem__(self, idx: Union[int, Tuple[object, str], List[int]]):
"""
# We have two method to get the time-series of a sample
tsds is a instance of TSDataSampler
# 1) sample by int index directly
tsds[len(tsds) - 1]
# 2) sample by <datetime,instrument> index
tsds['2016-12-31', "SZ300315"]
# The return value will be similar to the data retrieved by following code
df.loc(axis=0)['2015-01-01':'2016-12-31', "SZ300315"].iloc[-30:]
Parameters
----------
idx : Union[int, Tuple[object, str]]
"""
# Multi-index type
mtit = (list, np.ndarray)
if isinstance(idx, mtit):
indices = [self._get_indices(*self._get_row_col(i)) for i in idx]
indices = np.concatenate(indices)
else:
indices = self._get_indices(*self._get_row_col(idx))
# 1) for better performance, use the last nan line for padding the lost date
# 2) In case of precision problems. We use np.float64. # TODO: I'm not sure if whether np.float64 will result in
# precision problems. It will not cause any problems in my tests at least
indices = np.nan_to_num(indices.astype(np.float64), nan=self.nan_idx).astype(int)
if (np.diff(indices) == 1).all(): # slicing instead of indexing for speeding up.
data = self.data_arr[indices[0] : indices[-1] + 1]
else:
data = self.data_arr[indices]
if isinstance(idx, mtit):
# if we get multiple indexes, addition dimension should be added.
# <sample_idx, step_idx, feature_idx>
data = data.reshape(-1, self.step_len, *data.shape[1:])
return data
|
# We have two method to get the time-series of a sample
tsds is a instance of TSDataSampler
# 1) sample by int index directly
tsds[len(tsds) - 1]
# 2) sample by <datetime,instrument> index
tsds['2016-12-31', "SZ300315"]
# The return value will be similar to the data retrieved by following code
df.loc(axis=0)['2015-01-01':'2016-12-31', "SZ300315"].iloc[-30:]
Parameters
----------
idx : Union[int, Tuple[object, str]]
|
__getitem__
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _prepare_seg(self, slc: slice, **kwargs) -> TSDataSampler:
"""
split the _prepare_raw_seg is to leave a hook for data preprocessing before creating processing data
NOTE: TSDatasetH only support slc segment on datetime !!!
"""
dtype = kwargs.pop("dtype", None)
if not isinstance(slc, slice):
slc = slice(*slc)
start, end = slc.start, slc.stop
flt_col = kwargs.pop("flt_col", None)
# TSDatasetH will retrieve more data for complete time-series
ext_slice = self._extend_slice(slc, self.cal, self.step_len)
data = super()._prepare_seg(ext_slice, **kwargs)
flt_kwargs = deepcopy(kwargs)
if flt_col is not None:
flt_kwargs["col_set"] = flt_col
flt_data = super()._prepare_seg(ext_slice, **flt_kwargs)
assert len(flt_data.columns) == 1
else:
flt_data = None
tsds = TSDataSampler(
data=data,
start=start,
end=end,
step_len=self.step_len,
dtype=dtype,
flt_data=flt_data,
)
return tsds
|
split the _prepare_raw_seg is to leave a hook for data preprocessing before creating processing data
NOTE: TSDatasetH only support slc segment on datetime !!!
|
_prepare_seg
|
python
|
microsoft/qlib
|
qlib/data/dataset/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/__init__.py
|
MIT
|
def _freq_file(self) -> str:
"""the freq to read from file"""
if not hasattr(self, "_freq_file_cache"):
freq = Freq(self.freq)
if freq not in self.support_freq:
# NOTE: uri
# 1. If `uri` does not exist
# - Get the `min_uri` of the closest `freq` under the same "directory" as the `uri`
# - Read data from `min_uri` and resample to `freq`
freq = Freq.get_recent_freq(freq, self.support_freq)
if freq is None:
raise ValueError(f"can't find a freq from {self.support_freq} that can resample to {self.freq}!")
self._freq_file_cache = freq
return self._freq_file_cache
|
the freq to read from file
|
_freq_file
|
python
|
microsoft/qlib
|
qlib/data/storage/file_storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/file_storage.py
|
MIT
|
def __delitem__(self, i) -> None:
"""
Raises
------
ValueError
If the data(storage) does not exist, raise ValueError
"""
raise NotImplementedError(
"Subclass of CalendarStorage must implement `__delitem__(i: int)`/`__delitem__(s: slice)` method"
)
|
Raises
------
ValueError
If the data(storage) does not exist, raise ValueError
|
__delitem__
|
python
|
microsoft/qlib
|
qlib/data/storage/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/storage.py
|
MIT
|
def __getitem__(self, i) -> CalVT:
"""
Raises
------
ValueError
If the data(storage) does not exist, raise ValueError
"""
raise NotImplementedError(
"Subclass of CalendarStorage must implement `__getitem__(i: int)`/`__getitem__(s: slice)` method"
)
|
Raises
------
ValueError
If the data(storage) does not exist, raise ValueError
|
__getitem__
|
python
|
microsoft/qlib
|
qlib/data/storage/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/storage.py
|
MIT
|
def rebase(self, start_index: int = None, end_index: int = None):
"""Rebase the start_index and end_index of the FeatureStorage.
start_index and end_index are closed intervals: [start_index, end_index]
Examples
---------
.. code-block::
feature:
3 4
4 5
5 6
>>> self.rebase(start_index=4)
feature:
4 5
5 6
>>> self.rebase(start_index=3)
feature:
3 np.nan
4 5
5 6
>>> self.write([3], index=3)
feature:
3 3
4 5
5 6
>>> self.rebase(end_index=4)
feature:
3 3
4 5
>>> self.write([6, 7, 8], index=4)
feature:
3 3
4 6
5 7
6 8
>>> self.rebase(start_index=4, end_index=5)
feature:
4 6
5 7
"""
storage_si = self.start_index
storage_ei = self.end_index
if storage_si is None or storage_ei is None:
raise ValueError("storage.start_index or storage.end_index is None, storage may not exist")
start_index = storage_si if start_index is None else start_index
end_index = storage_ei if end_index is None else end_index
if start_index is None or end_index is None:
logger.warning("both start_index and end_index are None, or storage does not exist; rebase is ignored")
return
if start_index < 0 or end_index < 0:
logger.warning("start_index or end_index cannot be less than 0")
return
if start_index > end_index:
logger.warning(
f"start_index({start_index}) > end_index({end_index}), rebase is ignored; "
f"if you need to clear the FeatureStorage, please execute: FeatureStorage.clear"
)
return
if start_index <= storage_si:
self.write([np.nan] * (storage_si - start_index), start_index)
else:
self.rewrite(self[start_index:].values, start_index)
if end_index >= self.end_index:
self.write([np.nan] * (end_index - self.end_index))
else:
self.rewrite(self[: end_index + 1].values, start_index)
|
Rebase the start_index and end_index of the FeatureStorage.
start_index and end_index are closed intervals: [start_index, end_index]
Examples
---------
.. code-block::
feature:
3 4
4 5
5 6
>>> self.rebase(start_index=4)
feature:
4 5
5 6
>>> self.rebase(start_index=3)
feature:
3 np.nan
4 5
5 6
>>> self.write([3], index=3)
feature:
3 3
4 5
5 6
>>> self.rebase(end_index=4)
feature:
3 3
4 5
>>> self.write([6, 7, 8], index=4)
feature:
3 3
4 6
5 7
6 8
>>> self.rebase(start_index=4, end_index=5)
feature:
4 6
5 7
|
rebase
|
python
|
microsoft/qlib
|
qlib/data/storage/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/storage.py
|
MIT
|
def __getitem__(self, s: slice) -> pd.Series:
"""x.__getitem__(slice(start: int, stop: int, step: int)) <==> x[start:stop:step]
Returns
-------
pd.Series(values, index=pd.RangeIndex(start, len(values))
"""
|
x.__getitem__(slice(start: int, stop: int, step: int)) <==> x[start:stop:step]
Returns
-------
pd.Series(values, index=pd.RangeIndex(start, len(values))
|
__getitem__
|
python
|
microsoft/qlib
|
qlib/data/storage/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/storage.py
|
MIT
|
def __getitem__(self, i) -> Union[Tuple[int, float], pd.Series]:
"""x.__getitem__(y) <==> x[y]
Notes
-------
if data(storage) does not exist:
if isinstance(i, int):
return (None, None)
if isinstance(i, slice):
# return empty pd.Series
return pd.Series(dtype=np.float32)
"""
raise NotImplementedError(
"Subclass of FeatureStorage must implement `__getitem__(i: int)`/`__getitem__(s: slice)` method"
)
|
x.__getitem__(y) <==> x[y]
Notes
-------
if data(storage) does not exist:
if isinstance(i, int):
return (None, None)
if isinstance(i, slice):
# return empty pd.Series
return pd.Series(dtype=np.float32)
|
__getitem__
|
python
|
microsoft/qlib
|
qlib/data/storage/storage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/storage/storage.py
|
MIT
|
def begin_task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder:
"""
Begin task training to start a recorder and save the task config.
Args:
task_config (dict): the config of a task
experiment_name (str): the name of experiment
recorder_name (str): the given name will be the recorder name. None for using rid.
Returns:
Recorder: the model recorder
"""
with R.start(experiment_name=experiment_name, recorder_name=recorder_name):
_log_task_info(task_config)
return R.get_recorder()
|
Begin task training to start a recorder and save the task config.
Args:
task_config (dict): the config of a task
experiment_name (str): the name of experiment
recorder_name (str): the given name will be the recorder name. None for using rid.
Returns:
Recorder: the model recorder
|
begin_task_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def end_task_train(rec: Recorder, experiment_name: str) -> Recorder:
"""
Finish task training with real model fitting and saving.
Args:
rec (Recorder): the recorder will be resumed
experiment_name (str): the name of experiment
Returns:
Recorder: the model recorder
"""
with R.start(experiment_name=experiment_name, recorder_id=rec.info["id"], resume=True):
task_config = R.load_object("task")
_exe_task(task_config)
return rec
|
Finish task training with real model fitting and saving.
Args:
rec (Recorder): the recorder will be resumed
experiment_name (str): the name of experiment
Returns:
Recorder: the model recorder
|
end_task_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder:
"""
Task based training, will be divided into two steps.
Parameters
----------
task_config : dict
The config of a task.
experiment_name: str
The name of experiment
recorder_name: str
The name of recorder
Returns
----------
Recorder: The instance of the recorder
"""
with R.start(experiment_name=experiment_name, recorder_name=recorder_name):
_log_task_info(task_config)
_exe_task(task_config)
return R.get_recorder()
|
Task based training, will be divided into two steps.
Parameters
----------
task_config : dict
The config of a task.
experiment_name: str
The name of experiment
recorder_name: str
The name of recorder
Returns
----------
Recorder: The instance of the recorder
|
task_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def __init__(
self,
experiment_name: Optional[str] = None,
train_func: Callable = task_train,
call_in_subproc: bool = False,
default_rec_name: Optional[str] = None,
):
"""
Init TrainerR.
Args:
experiment_name (str, optional): the default name of experiment.
train_func (Callable, optional): default training method. Defaults to `task_train`.
call_in_subproc (bool): call the process in subprocess to force memory release
"""
super().__init__()
self.experiment_name = experiment_name
self.default_rec_name = default_rec_name
self.train_func = train_func
self._call_in_subproc = call_in_subproc
|
Init TrainerR.
Args:
experiment_name (str, optional): the default name of experiment.
train_func (Callable, optional): default training method. Defaults to `task_train`.
call_in_subproc (bool): call the process in subprocess to force memory release
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def train(self, tasks: list, train_func: Callable = None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of `tasks` and return a list of trained Recorder. The order can be guaranteed.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
recs = []
for task in tqdm(tasks, desc="train tasks"):
if self._call_in_subproc:
get_module_logger("TrainerR").info("running models in sub process (for forcing release memroy).")
train_func = call_in_subproc(train_func, C)
rec = train_func(task, experiment_name, recorder_name=self.default_rec_name, **kwargs)
rec.set_tags(**{self.STATUS_KEY: self.STATUS_BEGIN})
recs.append(rec)
return recs
|
Given a list of `tasks` and return a list of trained Recorder. The order can be guaranteed.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
|
train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def end_train(self, models: list, **kwargs) -> List[Recorder]:
"""
Set STATUS_END tag to the recorders.
Args:
models (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
"""
if isinstance(models, Recorder):
models = [models]
for rec in models:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return models
|
Set STATUS_END tag to the recorders.
Args:
models (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
|
end_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def __init__(
self, experiment_name: str = None, train_func=begin_task_train, end_train_func=end_task_train, **kwargs
):
"""
Init TrainerRM.
Args:
experiment_name (str): the default name of experiment.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
"""
super().__init__(experiment_name, train_func, **kwargs)
self.end_train_func = end_train_func
self.delay = True
|
Init TrainerRM.
Args:
experiment_name (str): the default name of experiment.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def end_train(self, models, end_train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
models (list): a list of Recorder, the tasks have been saved to them
end_train_func (Callable, optional): the end_train method which needs at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(models, Recorder):
models = [models]
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
for rec in models:
if rec.list_tags()[self.STATUS_KEY] == self.STATUS_END:
continue
end_train_func(rec, experiment_name, **kwargs)
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return models
|
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
models (list): a list of Recorder, the tasks have been saved to them
end_train_func (Callable, optional): the end_train method which needs at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
|
end_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def __init__(
self,
experiment_name: str = None,
task_pool: str = None,
train_func=task_train,
skip_run_task: bool = False,
default_rec_name: Optional[str] = None,
):
"""
Init TrainerR.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default training method. Defaults to `task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
"""
super().__init__()
self.experiment_name = experiment_name
self.task_pool = task_pool
self.train_func = train_func
self.skip_run_task = skip_run_task
self.default_rec_name = default_rec_name
|
Init TrainerR.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default training method. Defaults to `task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def train(
self,
tasks: list,
train_func: Callable = None,
experiment_name: str = None,
before_status: str = TaskManager.STATUS_WAITING,
after_status: str = TaskManager.STATUS_DONE,
default_rec_name: Optional[str] = None,
**kwargs,
) -> List[Recorder]:
"""
Given a list of `tasks` and return a list of trained Recorder. The order can be guaranteed.
This method defaults to a single process, but TaskManager offered a great way to parallel training.
Users can customize their train_func to realize multiple processes or even multiple machines.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
before_status (str): the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE.
after_status (str): the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
if default_rec_name is None:
default_rec_name = self.default_rec_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
tm = TaskManager(task_pool=task_pool)
_id_list = tm.create_task(tasks) # all tasks will be saved to MongoDB
query = {"_id": {"$in": _id_list}}
if not self.skip_run_task:
run_task(
train_func,
task_pool,
query=query, # only train these tasks
experiment_name=experiment_name,
before_status=before_status,
after_status=after_status,
recorder_name=default_rec_name,
**kwargs,
)
if not self.is_delay():
tm.wait(query=query)
recs = []
for _id in _id_list:
rec = tm.re_query(_id)["res"]
rec.set_tags(**{self.STATUS_KEY: self.STATUS_BEGIN})
rec.set_tags(**{self.TM_ID: _id})
recs.append(rec)
return recs
|
Given a list of `tasks` and return a list of trained Recorder. The order can be guaranteed.
This method defaults to a single process, but TaskManager offered a great way to parallel training.
Users can customize their train_func to realize multiple processes or even multiple machines.
Args:
tasks (list): a list of definitions based on `task` dict
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
before_status (str): the tasks in before_status will be fetched and trained. Can be STATUS_WAITING, STATUS_PART_DONE.
after_status (str): the tasks after trained will become after_status. Can be STATUS_WAITING, STATUS_PART_DONE.
kwargs: the params for train_func.
Returns:
List[Recorder]: a list of Recorders
|
train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def end_train(self, recs: list, **kwargs) -> List[Recorder]:
"""
Set STATUS_END tag to the recorders.
Args:
recs (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
"""
if isinstance(recs, Recorder):
recs = [recs]
for rec in recs:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return recs
|
Set STATUS_END tag to the recorders.
Args:
recs (list): a list of trained recorders.
Returns:
List[Recorder]: the same list as the param.
|
end_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def worker(
self,
train_func: Callable = None,
experiment_name: str = None,
):
"""
The multiprocessing method for `train`. It can share a same task_pool with `train` and can run in other progress or other machines.
Args:
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
"""
if train_func is None:
train_func = self.train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
run_task(train_func, task_pool=task_pool, experiment_name=experiment_name)
|
The multiprocessing method for `train`. It can share a same task_pool with `train` and can run in other progress or other machines.
Args:
train_func (Callable): the training method which needs at least `tasks` and `experiment_name`. None for the default training method.
experiment_name (str): the experiment name, None for use default name.
|
worker
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def __init__(
self,
experiment_name: str = None,
task_pool: str = None,
train_func=begin_task_train,
end_train_func=end_task_train,
skip_run_task: bool = False,
**kwargs,
):
"""
Init DelayTrainerRM.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
E.g. Starting trainer on a CPU VM and then waiting tasks to be finished on GPU VMs.
"""
super().__init__(experiment_name, task_pool, train_func, **kwargs)
self.end_train_func = end_train_func
self.delay = True
self.skip_run_task = skip_run_task
|
Init DelayTrainerRM.
Args:
experiment_name (str): the default name of experiment.
task_pool (str): task pool name in TaskManager. None for use same name as experiment_name.
train_func (Callable, optional): default train method. Defaults to `begin_task_train`.
end_train_func (Callable, optional): default end_train method. Defaults to `end_task_train`.
skip_run_task (bool):
If skip_run_task == True:
Only run_task in the worker. Otherwise skip run_task.
E.g. Starting trainer on a CPU VM and then waiting tasks to be finished on GPU VMs.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def train(self, tasks: list, train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Same as `train` of TrainerRM, after_status will be STATUS_PART_DONE.
Args:
tasks (list): a list of definition based on `task` dict
train_func (Callable): the train method which need at least `tasks` and `experiment_name`. Defaults to None for using self.train_func.
experiment_name (str): the experiment name, None for use default name.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(tasks, dict):
tasks = [tasks]
if len(tasks) == 0:
return []
_skip_run_task = self.skip_run_task
self.skip_run_task = False # The task preparation can't be skipped
res = super().train(
tasks,
train_func=train_func,
experiment_name=experiment_name,
after_status=TaskManager.STATUS_PART_DONE,
**kwargs,
)
self.skip_run_task = _skip_run_task
return res
|
Same as `train` of TrainerRM, after_status will be STATUS_PART_DONE.
Args:
tasks (list): a list of definition based on `task` dict
train_func (Callable): the train method which need at least `tasks` and `experiment_name`. Defaults to None for using self.train_func.
experiment_name (str): the experiment name, None for use default name.
Returns:
List[Recorder]: a list of Recorders
|
train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def end_train(self, recs, end_train_func=None, experiment_name: str = None, **kwargs) -> List[Recorder]:
"""
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
recs (list): a list of Recorder, the tasks have been saved to them.
end_train_func (Callable, optional): the end_train method which need at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
"""
if isinstance(recs, Recorder):
recs = [recs]
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
_id_list = []
for rec in recs:
_id_list.append(rec.list_tags()[self.TM_ID])
query = {"_id": {"$in": _id_list}}
if not self.skip_run_task:
run_task(
end_train_func,
task_pool,
query=query, # only train these tasks
experiment_name=experiment_name,
before_status=TaskManager.STATUS_PART_DONE,
**kwargs,
)
TaskManager(task_pool=task_pool).wait(query=query)
for rec in recs:
rec.set_tags(**{self.STATUS_KEY: self.STATUS_END})
return recs
|
Given a list of Recorder and return a list of trained Recorder.
This class will finish real data loading and model fitting.
Args:
recs (list): a list of Recorder, the tasks have been saved to them.
end_train_func (Callable, optional): the end_train method which need at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
kwargs: the params for end_train_func.
Returns:
List[Recorder]: a list of Recorders
|
end_train
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def worker(self, end_train_func=None, experiment_name: str = None):
"""
The multiprocessing method for `end_train`. It can share a same task_pool with `end_train` and can run in other progress or other machines.
Args:
end_train_func (Callable, optional): the end_train method which need at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
"""
if end_train_func is None:
end_train_func = self.end_train_func
if experiment_name is None:
experiment_name = self.experiment_name
task_pool = self.task_pool
if task_pool is None:
task_pool = experiment_name
run_task(
end_train_func,
task_pool=task_pool,
experiment_name=experiment_name,
before_status=TaskManager.STATUS_PART_DONE,
)
|
The multiprocessing method for `end_train`. It can share a same task_pool with `end_train` and can run in other progress or other machines.
Args:
end_train_func (Callable, optional): the end_train method which need at least `recorders` and `experiment_name`. Defaults to None for using self.end_train_func.
experiment_name (str): the experiment name, None for use default name.
|
worker
|
python
|
microsoft/qlib
|
qlib/model/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/trainer.py
|
MIT
|
def __call__(self, ensemble_dict: dict) -> pd.DataFrame:
"""using sample:
from qlib.model.ens.ensemble import AverageEnsemble
pred_res['new_key_name'] = AverageEnsemble()(predict_dict)
Parameters
----------
ensemble_dict : dict
Dictionary you want to ensemble
Returns
-------
pd.DataFrame
The dictionary including ensenbling result
"""
# need to flatten the nested dict
ensemble_dict = flatten_dict(ensemble_dict, sep=FLATTEN_TUPLE)
get_module_logger("AverageEnsemble").info(f"keys in group: {list(ensemble_dict.keys())}")
values = list(ensemble_dict.values())
# NOTE: this may change the style underlying data!!!!
# from pd.DataFrame to pd.Series
results = pd.concat(values, axis=1)
results = results.groupby("datetime", group_keys=False).apply(lambda df: (df - df.mean()) / df.std())
results = results.mean(axis=1)
results = results.sort_index()
return results
|
using sample:
from qlib.model.ens.ensemble import AverageEnsemble
pred_res['new_key_name'] = AverageEnsemble()(predict_dict)
Parameters
----------
ensemble_dict : dict
Dictionary you want to ensemble
Returns
-------
pd.DataFrame
The dictionary including ensenbling result
|
__call__
|
python
|
microsoft/qlib
|
qlib/model/ens/ensemble.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/ensemble.py
|
MIT
|
def __init__(self, group_func=None, ens: Ensemble = None):
"""
Init Group.
Args:
group_func (Callable, optional): Given a dict and return the group key and one of the group elements.
For example: {(A,B,C1): object, (A,B,C2): object} -> {(A,B): {C1: object, C2: object}}
Defaults to None.
ens (Ensemble, optional): If not None, do ensemble for grouped value after grouping.
"""
self._group_func = group_func
self._ens_func = ens
|
Init Group.
Args:
group_func (Callable, optional): Given a dict and return the group key and one of the group elements.
For example: {(A,B,C1): object, (A,B,C2): object} -> {(A,B): {C1: object, C2: object}}
Defaults to None.
ens (Ensemble, optional): If not None, do ensemble for grouped value after grouping.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/ens/group.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/group.py
|
MIT
|
def group(self, *args, **kwargs) -> dict:
"""
Group a set of objects and change them to a dict.
For example: {(A,B,C1): object, (A,B,C2): object} -> {(A,B): {C1: object, C2: object}}
Returns:
dict: grouped dict
"""
if isinstance(getattr(self, "_group_func", None), Callable):
return self._group_func(*args, **kwargs)
else:
raise NotImplementedError(f"Please specify valid `group_func`.")
|
Group a set of objects and change them to a dict.
For example: {(A,B,C1): object, (A,B,C2): object} -> {(A,B): {C1: object, C2: object}}
Returns:
dict: grouped dict
|
group
|
python
|
microsoft/qlib
|
qlib/model/ens/group.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/group.py
|
MIT
|
def reduce(self, *args, **kwargs) -> dict:
"""
Reduce grouped dict.
For example: {(A,B): {C1: object, C2: object}} -> {(A,B): object}
Returns:
dict: reduced dict
"""
if isinstance(getattr(self, "_ens_func", None), Callable):
return self._ens_func(*args, **kwargs)
else:
raise NotImplementedError(f"Please specify valid `_ens_func`.")
|
Reduce grouped dict.
For example: {(A,B): {C1: object, C2: object}} -> {(A,B): object}
Returns:
dict: reduced dict
|
reduce
|
python
|
microsoft/qlib
|
qlib/model/ens/group.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/group.py
|
MIT
|
def __call__(self, ungrouped_dict: dict, n_jobs: int = 1, verbose: int = 0, *args, **kwargs) -> dict:
"""
Group the ungrouped_dict into different groups.
Args:
ungrouped_dict (dict): the ungrouped dict waiting for grouping like {name: things}
Returns:
dict: grouped_dict like {G1: object, G2: object}
n_jobs: how many progress you need.
verbose: the print mode for Parallel.
"""
# NOTE: The multiprocessing will raise error if you use `Serializable`
# Because the `Serializable` will affect the behaviors of pickle
grouped_dict = self.group(ungrouped_dict, *args, **kwargs)
key_l = []
job_l = []
for key, value in grouped_dict.items():
key_l.append(key)
job_l.append(delayed(Group.reduce)(self, value))
return dict(zip(key_l, Parallel(n_jobs=n_jobs, verbose=verbose)(job_l)))
|
Group the ungrouped_dict into different groups.
Args:
ungrouped_dict (dict): the ungrouped dict waiting for grouping like {name: things}
Returns:
dict: grouped_dict like {G1: object, G2: object}
n_jobs: how many progress you need.
verbose: the print mode for Parallel.
|
__call__
|
python
|
microsoft/qlib
|
qlib/model/ens/group.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/group.py
|
MIT
|
def group(self, rolling_dict: dict) -> dict:
"""Given an rolling dict likes {(A,B,R): things}, return the grouped dict likes {(A,B): {R:things}}
NOTE: There is an assumption which is the rolling key is at the end of the key tuple, because the rolling results always need to be ensemble firstly.
Args:
rolling_dict (dict): an rolling dict. If the key is not a tuple, then do nothing.
Returns:
dict: grouped dict
"""
grouped_dict = {}
for key, values in rolling_dict.items():
if isinstance(key, tuple):
grouped_dict.setdefault(key[:-1], {})[key[-1]] = values
else:
raise TypeError(f"Expected `tuple` type, but got a value `{key}`")
return grouped_dict
|
Given an rolling dict likes {(A,B,R): things}, return the grouped dict likes {(A,B): {R:things}}
NOTE: There is an assumption which is the rolling key is at the end of the key tuple, because the rolling results always need to be ensemble firstly.
Args:
rolling_dict (dict): an rolling dict. If the key is not a tuple, then do nothing.
Returns:
dict: grouped dict
|
group
|
python
|
microsoft/qlib
|
qlib/model/ens/group.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/ens/group.py
|
MIT
|
def get_feature_importance(self) -> pd.Series:
"""get feature importance
Returns
-------
The index is the feature name.
The greater the value, the higher importance.
"""
|
get feature importance
Returns
-------
The index is the feature name.
The greater the value, the higher importance.
|
get_feature_importance
|
python
|
microsoft/qlib
|
qlib/model/interpret/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/interpret/base.py
|
MIT
|
def get_feature_importance(self, *args, **kwargs) -> pd.Series:
"""get feature importance
Notes
-----
parameters reference:
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html?highlight=feature_importance#lightgbm.Booster.feature_importance
"""
return pd.Series(
self.model.feature_importance(*args, **kwargs), index=self.model.feature_name()
).sort_values( # pylint: disable=E1101
ascending=False
)
|
get feature importance
Notes
-----
parameters reference:
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html?highlight=feature_importance#lightgbm.Booster.feature_importance
|
get_feature_importance
|
python
|
microsoft/qlib
|
qlib/model/interpret/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/interpret/base.py
|
MIT
|
def __init__(self, segments: Union[Dict[Text, Tuple], float], *args, **kwargs):
"""
The meta-dataset maintains a list of meta-tasks when it is initialized.
The segments indicates the way to divide the data
The duty of the `__init__` function of MetaTaskDataset
- initialize the tasks
"""
super().__init__(*args, **kwargs)
self.segments = segments
|
The meta-dataset maintains a list of meta-tasks when it is initialized.
The segments indicates the way to divide the data
The duty of the `__init__` function of MetaTaskDataset
- initialize the tasks
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/meta/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/dataset.py
|
MIT
|
def prepare_tasks(self, segments: Union[List[Text], Text], *args, **kwargs) -> List[MetaTask]:
"""
Prepare the data in each meta-task and ready for training.
The following code example shows how to retrieve a list of meta-tasks from the `meta_dataset`:
.. code-block:: Python
# get the train segment and the test segment, both of them are lists
train_meta_tasks, test_meta_tasks = meta_dataset.prepare_tasks(["train", "test"])
Parameters
----------
segments: Union[List[Text], Tuple[Text], Text]
the info to select data
Returns
-------
list:
A list of the prepared data of each meta-task for training the meta-model. For multiple segments [seg1, seg2, ... , segN], the returned list will be [[tasks in seg1], [tasks in seg2], ... , [tasks in segN]].
Each task is a meta task
"""
if isinstance(segments, (list, tuple)):
return [self._prepare_seg(seg) for seg in segments]
elif isinstance(segments, str):
return self._prepare_seg(segments)
else:
raise NotImplementedError(f"This type of input is not supported")
|
Prepare the data in each meta-task and ready for training.
The following code example shows how to retrieve a list of meta-tasks from the `meta_dataset`:
.. code-block:: Python
# get the train segment and the test segment, both of them are lists
train_meta_tasks, test_meta_tasks = meta_dataset.prepare_tasks(["train", "test"])
Parameters
----------
segments: Union[List[Text], Tuple[Text], Text]
the info to select data
Returns
-------
list:
A list of the prepared data of each meta-task for training the meta-model. For multiple segments [seg1, seg2, ... , segN], the returned list will be [[tasks in seg1], [tasks in seg2], ... , [tasks in segN]].
Each task is a meta task
|
prepare_tasks
|
python
|
microsoft/qlib
|
qlib/model/meta/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/dataset.py
|
MIT
|
def _prepare_seg(self, segment: Text):
"""
prepare a single segment of data for training data
Parameters
----------
seg : Text
the name of the segment
"""
|
prepare a single segment of data for training data
Parameters
----------
seg : Text
the name of the segment
|
_prepare_seg
|
python
|
microsoft/qlib
|
qlib/model/meta/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/dataset.py
|
MIT
|
def fit(self, *args, **kwargs):
"""
The training process of the meta-model.
"""
|
The training process of the meta-model.
|
fit
|
python
|
microsoft/qlib
|
qlib/model/meta/model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/model.py
|
MIT
|
def inference(self, *args, **kwargs) -> object:
"""
The inference process of the meta-model.
Returns
-------
object:
Some information to guide the model learning
"""
|
The inference process of the meta-model.
Returns
-------
object:
Some information to guide the model learning
|
inference
|
python
|
microsoft/qlib
|
qlib/model/meta/model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/model.py
|
MIT
|
def __init__(self, task: dict, meta_info: object, mode: str = PROC_MODE_FULL):
"""
The `__init__` func is responsible for
- store the task
- store the origin input data for
- process the input data for meta data
Parameters
----------
task : dict
the task to be enhanced by meta model
meta_info : object
the input for meta model
"""
self.task = task
self.meta_info = meta_info # the original meta input information, it will be processed later
self.mode = mode
|
The `__init__` func is responsible for
- store the task
- store the origin input data for
- process the input data for meta data
Parameters
----------
task : dict
the task to be enhanced by meta model
meta_info : object
the input for meta model
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/meta/task.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/meta/task.py
|
MIT
|
def __init__(self, nan_option: str = "ignore", assume_centered: bool = False, scale_return: bool = True):
"""
Args:
nan_option (str): nan handling option (`ignore`/`mask`/`fill`).
assume_centered (bool): whether the data is assumed to be centered.
scale_return (bool): whether scale returns as percentage.
"""
# nan
assert nan_option in [
self.MASK_NAN,
self.FILL_NAN,
self.IGNORE_NAN,
], f"`nan_option={nan_option}` is not supported"
self.nan_option = nan_option
self.assume_centered = assume_centered
self.scale_return = scale_return
|
Args:
nan_option (str): nan handling option (`ignore`/`mask`/`fill`).
assume_centered (bool): whether the data is assumed to be centered.
scale_return (bool): whether scale returns as percentage.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/base.py
|
MIT
|
def predict(
self,
X: Union[pd.Series, pd.DataFrame, np.ndarray],
return_corr: bool = False,
is_price: bool = True,
return_decomposed_components=False,
) -> Union[pd.DataFrame, np.ndarray, tuple]:
"""
Args:
X (pd.Series, pd.DataFrame or np.ndarray): data from which to estimate the covariance,
with variables as columns and observations as rows.
return_corr (bool): whether return the correlation matrix.
is_price (bool): whether `X` contains price (if not assume stock returns).
return_decomposed_components (bool): whether return decomposed components of the covariance matrix.
Returns:
pd.DataFrame or np.ndarray: estimated covariance (or correlation).
"""
assert (
not return_corr or not return_decomposed_components
), "Can only return either correlation matrix or decomposed components."
# transform input into 2D array
if not isinstance(X, (pd.Series, pd.DataFrame)):
columns = None
else:
if isinstance(X.index, pd.MultiIndex):
if isinstance(X, pd.DataFrame):
X = X.iloc[:, 0].unstack(level="instrument") # always use the first column
else:
X = X.unstack(level="instrument")
else:
# X is 2D DataFrame
pass
columns = X.columns # will be used to restore dataframe
X = X.values
# calculate pct_change
if is_price:
X = X[1:] / X[:-1] - 1 # NOTE: resulting `n - 1` rows
# scale return
if self.scale_return:
X *= 100
# handle nan and centered
X = self._preprocess(X)
# return decomposed components if needed
if return_decomposed_components:
assert (
"return_decomposed_components" in inspect.getfullargspec(self._predict).args
), "This risk model does not support return decomposed components of the covariance matrix "
F, cov_b, var_u = self._predict(X, return_decomposed_components=True) # pylint: disable=E1123
return F, cov_b, var_u
# estimate covariance
S = self._predict(X)
# return correlation if needed
if return_corr:
vola = np.sqrt(np.diag(S))
corr = S / np.outer(vola, vola)
if columns is None:
return corr
return pd.DataFrame(corr, index=columns, columns=columns)
# return covariance
if columns is None:
return S
return pd.DataFrame(S, index=columns, columns=columns)
|
Args:
X (pd.Series, pd.DataFrame or np.ndarray): data from which to estimate the covariance,
with variables as columns and observations as rows.
return_corr (bool): whether return the correlation matrix.
is_price (bool): whether `X` contains price (if not assume stock returns).
return_decomposed_components (bool): whether return decomposed components of the covariance matrix.
Returns:
pd.DataFrame or np.ndarray: estimated covariance (or correlation).
|
predict
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/base.py
|
MIT
|
def _predict(self, X: np.ndarray) -> np.ndarray:
"""covariance estimation implementation
This method should be overridden by child classes.
By default, this method implements the empirical covariance estimation.
Args:
X (np.ndarray): data matrix containing multiple variables (columns) and observations (rows).
Returns:
np.ndarray: covariance matrix.
"""
xTx = np.asarray(X.T.dot(X))
N = len(X)
if isinstance(X, np.ma.MaskedArray):
M = 1 - X.mask
N = M.T.dot(M) # each pair has distinct number of samples
return xTx / N
|
covariance estimation implementation
This method should be overridden by child classes.
By default, this method implements the empirical covariance estimation.
Args:
X (np.ndarray): data matrix containing multiple variables (columns) and observations (rows).
Returns:
np.ndarray: covariance matrix.
|
_predict
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/base.py
|
MIT
|
def _preprocess(self, X: np.ndarray) -> Union[np.ndarray, np.ma.MaskedArray]:
"""handle nan and centerize data
Note:
if `nan_option='mask'` then the returned array will be `np.ma.MaskedArray`.
"""
# handle nan
if self.nan_option == self.FILL_NAN:
X = np.nan_to_num(X)
elif self.nan_option == self.MASK_NAN:
X = np.ma.masked_invalid(X)
# centralize
if not self.assume_centered:
X = X - np.nanmean(X, axis=0)
return X
|
handle nan and centerize data
Note:
if `nan_option='mask'` then the returned array will be `np.ma.MaskedArray`.
|
_preprocess
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/base.py
|
MIT
|
def __init__(self, num_factors: int = 0, thresh: float = 1.0, thresh_method: str = "soft", **kwargs):
"""
Args:
num_factors (int): number of factors (if set to zero, no factor model will be used).
thresh (float): the positive constant for thresholding.
thresh_method (str): thresholding method, which can be
- 'soft': soft thresholding.
- 'hard': hard thresholding.
- 'scad': scad thresholding.
kwargs: see `RiskModel` for more information.
"""
super().__init__(**kwargs)
assert num_factors >= 0, "`num_factors` requires a positive integer"
self.num_factors = num_factors
assert thresh >= 0, "`thresh` requires a positive float number"
self.thresh = thresh
assert thresh_method in [
self.THRESH_HARD,
self.THRESH_SOFT,
self.THRESH_SCAD,
], "`thresh_method` should be `soft`/`hard`/`scad`"
self.thresh_method = thresh_method
|
Args:
num_factors (int): number of factors (if set to zero, no factor model will be used).
thresh (float): the positive constant for thresholding.
thresh_method (str): thresholding method, which can be
- 'soft': soft thresholding.
- 'hard': hard thresholding.
- 'scad': scad thresholding.
kwargs: see `RiskModel` for more information.
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/poet.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/poet.py
|
MIT
|
def __init__(self, alpha: Union[str, float] = 0.0, target: Union[str, np.ndarray] = "const_var", **kwargs):
"""
Args:
alpha (str or float): shrinking parameter or estimator (`lw`/`oas`)
target (str or np.ndarray): shrinking target (`const_var`/`const_corr`/`single_factor`)
kwargs: see `RiskModel` for more information
"""
super().__init__(**kwargs)
# alpha
if isinstance(alpha, str):
assert alpha in [self.SHR_LW, self.SHR_OAS], f"shrinking method `{alpha}` is not supported"
elif isinstance(alpha, (float, np.floating)):
assert 0 <= alpha <= 1, "alpha should be between [0, 1]"
else:
raise TypeError("invalid argument type for `alpha`")
self.alpha = alpha
# target
if isinstance(target, str):
assert target in [
self.TGT_CONST_VAR,
self.TGT_CONST_CORR,
self.TGT_SINGLE_FACTOR,
], f"shrinking target `{target} is not supported"
elif isinstance(target, np.ndarray):
pass
else:
raise TypeError("invalid argument type for `target`")
if alpha == self.SHR_OAS and target != self.TGT_CONST_VAR:
raise NotImplementedError("currently `oas` can only support `const_var` as target")
self.target = target
|
Args:
alpha (str or float): shrinking parameter or estimator (`lw`/`oas`)
target (str or np.ndarray): shrinking target (`const_var`/`const_corr`/`single_factor`)
kwargs: see `RiskModel` for more information
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_target_const_var(self, X: np.ndarray, S: np.ndarray) -> np.ndarray:
"""get shrinking target with constant variance
This target assumes zero pair-wise correlation and constant variance.
The constant variance is estimated by averaging all sample's variances.
"""
n = len(S)
F = np.eye(n)
np.fill_diagonal(F, np.mean(np.diag(S)))
return F
|
get shrinking target with constant variance
This target assumes zero pair-wise correlation and constant variance.
The constant variance is estimated by averaging all sample's variances.
|
_get_shrink_target_const_var
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_target_const_corr(self, X: np.ndarray, S: np.ndarray) -> np.ndarray:
"""get shrinking target with constant correlation
This target assumes constant pair-wise correlation but keep the sample variance.
The constant correlation is estimated by averaging all pairwise correlations.
"""
n = len(S)
var = np.diag(S)
sqrt_var = np.sqrt(var)
covar = np.outer(sqrt_var, sqrt_var)
r_bar = (np.sum(S / covar) - n) / (n * (n - 1))
F = r_bar * covar
np.fill_diagonal(F, var)
return F
|
get shrinking target with constant correlation
This target assumes constant pair-wise correlation but keep the sample variance.
The constant correlation is estimated by averaging all pairwise correlations.
|
_get_shrink_target_const_corr
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_target_single_factor(self, X: np.ndarray, S: np.ndarray) -> np.ndarray:
"""get shrinking target with single factor model"""
X_mkt = np.nanmean(X, axis=1)
cov_mkt = np.asarray(X.T.dot(X_mkt) / len(X))
var_mkt = np.asarray(X_mkt.dot(X_mkt) / len(X))
F = np.outer(cov_mkt, cov_mkt) / var_mkt
np.fill_diagonal(F, np.diag(S))
return F
|
get shrinking target with single factor model
|
_get_shrink_target_single_factor
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_param(self, X: np.ndarray, S: np.ndarray, F: np.ndarray) -> float:
"""get shrinking parameter `alpha`
Note:
The Ledoit-Wolf shrinking parameter estimator consists of three different methods.
"""
if self.alpha == self.SHR_OAS:
return self._get_shrink_param_oas(X, S, F)
elif self.alpha == self.SHR_LW:
if self.target == self.TGT_CONST_VAR:
return self._get_shrink_param_lw_const_var(X, S, F)
if self.target == self.TGT_CONST_CORR:
return self._get_shrink_param_lw_const_corr(X, S, F)
if self.target == self.TGT_SINGLE_FACTOR:
return self._get_shrink_param_lw_single_factor(X, S, F)
return self.alpha
|
get shrinking parameter `alpha`
Note:
The Ledoit-Wolf shrinking parameter estimator consists of three different methods.
|
_get_shrink_param
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_param_oas(self, X: np.ndarray, S: np.ndarray, F: np.ndarray) -> float:
"""Oracle Approximating Shrinkage Estimator
This method uses the following formula to estimate the `alpha`
parameter for the shrink covariance estimator:
A = (1 - 2 / p) * trace(S^2) + trace^2(S)
B = (n + 1 - 2 / p) * (trace(S^2) - trace^2(S) / p)
alpha = A / B
where `n`, `p` are the dim of observations and variables respectively.
"""
trS2 = np.sum(S**2)
tr2S = np.trace(S) ** 2
n, p = X.shape
A = (1 - 2 / p) * (trS2 + tr2S)
B = (n + 1 - 2 / p) * (trS2 + tr2S / p)
alpha = A / B
return alpha
|
Oracle Approximating Shrinkage Estimator
This method uses the following formula to estimate the `alpha`
parameter for the shrink covariance estimator:
A = (1 - 2 / p) * trace(S^2) + trace^2(S)
B = (n + 1 - 2 / p) * (trace(S^2) - trace^2(S) / p)
alpha = A / B
where `n`, `p` are the dim of observations and variables respectively.
|
_get_shrink_param_oas
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_param_lw_const_var(self, X: np.ndarray, S: np.ndarray, F: np.ndarray) -> float:
"""Ledoit-Wolf Shrinkage Estimator (Constant Variance)
This method shrinks the covariance matrix towards the constand variance target.
"""
t, n = X.shape
y = X**2
phi = np.sum(y.T.dot(y) / t - S**2)
gamma = np.linalg.norm(S - F, "fro") ** 2
kappa = phi / gamma
alpha = max(0, min(1, kappa / t))
return alpha
|
Ledoit-Wolf Shrinkage Estimator (Constant Variance)
This method shrinks the covariance matrix towards the constand variance target.
|
_get_shrink_param_lw_const_var
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_param_lw_const_corr(self, X: np.ndarray, S: np.ndarray, F: np.ndarray) -> float:
"""Ledoit-Wolf Shrinkage Estimator (Constant Correlation)
This method shrinks the covariance matrix towards the constand correlation target.
"""
t, n = X.shape
var = np.diag(S)
sqrt_var = np.sqrt(var)
r_bar = (np.sum(S / np.outer(sqrt_var, sqrt_var)) - n) / (n * (n - 1))
y = X**2
phi_mat = y.T.dot(y) / t - S**2
phi = np.sum(phi_mat)
theta_mat = (X**3).T.dot(X) / t - var[:, None] * S
np.fill_diagonal(theta_mat, 0)
rho = np.sum(np.diag(phi_mat)) + r_bar * np.sum(np.outer(1 / sqrt_var, sqrt_var) * theta_mat)
gamma = np.linalg.norm(S - F, "fro") ** 2
kappa = (phi - rho) / gamma
alpha = max(0, min(1, kappa / t))
return alpha
|
Ledoit-Wolf Shrinkage Estimator (Constant Correlation)
This method shrinks the covariance matrix towards the constand correlation target.
|
_get_shrink_param_lw_const_corr
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def _get_shrink_param_lw_single_factor(self, X: np.ndarray, S: np.ndarray, F: np.ndarray) -> float:
"""Ledoit-Wolf Shrinkage Estimator (Single Factor Model)
This method shrinks the covariance matrix towards the single factor model target.
"""
t, n = X.shape
X_mkt = np.nanmean(X, axis=1)
cov_mkt = np.asarray(X.T.dot(X_mkt) / len(X))
var_mkt = np.asarray(X_mkt.dot(X_mkt) / len(X))
y = X**2
phi = np.sum(y.T.dot(y)) / t - np.sum(S**2)
rdiag = np.sum(y**2) / t - np.sum(np.diag(S) ** 2)
z = X * X_mkt[:, None]
v1 = y.T.dot(z) / t - cov_mkt[:, None] * S
roff1 = np.sum(v1 * cov_mkt[:, None].T) / var_mkt - np.sum(np.diag(v1) * cov_mkt) / var_mkt
v3 = z.T.dot(z) / t - var_mkt * S
roff3 = np.sum(v3 * np.outer(cov_mkt, cov_mkt)) / var_mkt**2 - np.sum(np.diag(v3) * cov_mkt**2) / var_mkt**2
roff = 2 * roff1 - roff3
rho = rdiag + roff
gamma = np.linalg.norm(S - F, "fro") ** 2
kappa = (phi - rho) / gamma
alpha = max(0, min(1, kappa / t))
return alpha
|
Ledoit-Wolf Shrinkage Estimator (Single Factor Model)
This method shrinks the covariance matrix towards the single factor model target.
|
_get_shrink_param_lw_single_factor
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/shrink.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/shrink.py
|
MIT
|
def __init__(self, factor_model: str = "pca", num_factors: int = 10, **kwargs):
"""
Args:
factor_model (str): the latent factor models used to estimate the structured covariance (`pca`/`fa`).
num_factors (int): number of components to keep.
kwargs: see `RiskModel` for more information
"""
if "nan_option" in kwargs:
assert kwargs["nan_option"] in [self.DEFAULT_NAN_OPTION], "nan_option={} is not supported".format(
kwargs["nan_option"]
)
else:
kwargs["nan_option"] = self.DEFAULT_NAN_OPTION
super().__init__(**kwargs)
assert factor_model in [
self.FACTOR_MODEL_PCA,
self.FACTOR_MODEL_FA,
], "factor_model={} is not supported".format(factor_model)
self.solver = PCA if factor_model == self.FACTOR_MODEL_PCA else FactorAnalysis
self.num_factors = num_factors
|
Args:
factor_model (str): the latent factor models used to estimate the structured covariance (`pca`/`fa`).
num_factors (int): number of components to keep.
kwargs: see `RiskModel` for more information
|
__init__
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/structured.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/structured.py
|
MIT
|
def _predict(self, X: np.ndarray, return_decomposed_components=False) -> Union[np.ndarray, tuple]:
"""
covariance estimation implementation
Args:
X (np.ndarray): data matrix containing multiple variables (columns) and observations (rows).
return_decomposed_components (bool): whether return decomposed components of the covariance matrix.
Returns:
tuple or np.ndarray: decomposed covariance matrix or covariance matrix.
"""
model = self.solver(self.num_factors, random_state=0).fit(X)
F = model.components_.T # variables x factors
B = model.transform(X) # observations x factors
U = X - B @ F.T
cov_b = np.cov(B.T) # factors x factors
var_u = np.var(U, axis=0) # diagonal
if return_decomposed_components:
return F, cov_b, var_u
cov_x = F @ cov_b @ F.T + np.diag(var_u)
return cov_x
|
covariance estimation implementation
Args:
X (np.ndarray): data matrix containing multiple variables (columns) and observations (rows).
return_decomposed_components (bool): whether return decomposed components of the covariance matrix.
Returns:
tuple or np.ndarray: decomposed covariance matrix or covariance matrix.
|
_predict
|
python
|
microsoft/qlib
|
qlib/model/riskmodel/structured.py
|
https://github.com/microsoft/qlib/blob/master/qlib/model/riskmodel/structured.py
|
MIT
|
def _gym_space_contains(space: gym.Space, x: Any) -> None:
"""Strengthened version of gym.Space.contains.
Giving more diagnostic information on why validation fails.
Throw exception rather than returning true or false.
"""
if isinstance(space, spaces.Dict):
if not isinstance(x, dict) or len(x) != len(space):
raise GymSpaceValidationError("Sample must be a dict with same length as space.", space, x)
for k, subspace in space.spaces.items():
if k not in x:
raise GymSpaceValidationError(f"Key {k} not found in sample.", space, x)
try:
_gym_space_contains(subspace, x[k])
except GymSpaceValidationError as e:
raise GymSpaceValidationError(f"Subspace of key {k} validation error.", space, x) from e
elif isinstance(space, spaces.Tuple):
if isinstance(x, (list, np.ndarray)):
x = tuple(x) # Promote list and ndarray to tuple for contains check
if not isinstance(x, tuple) or len(x) != len(space):
raise GymSpaceValidationError("Sample must be a tuple with same length as space.", space, x)
for i, (subspace, part) in enumerate(zip(space, x)):
try:
_gym_space_contains(subspace, part)
except GymSpaceValidationError as e:
raise GymSpaceValidationError(f"Subspace of index {i} validation error.", space, x) from e
else:
if not space.contains(x):
raise GymSpaceValidationError("Validation error reported by gym.", space, x)
|
Strengthened version of gym.Space.contains.
Giving more diagnostic information on why validation fails.
Throw exception rather than returning true or false.
|
_gym_space_contains
|
python
|
microsoft/qlib
|
qlib/rl/interpreter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/interpreter.py
|
MIT
|
def _generate_report(
decisions: List[BaseTradeDecision],
report_indicators: List[INDICATOR_METRIC],
) -> Dict[str, Tuple[pd.DataFrame, pd.DataFrame]]:
"""Generate backtest reports
Parameters
----------
decisions:
List of trade decisions.
report_indicators
List of indicator reports.
Returns
-------
"""
indicator_dict: Dict[str, List[pd.DataFrame]] = defaultdict(list)
indicator_his: Dict[str, List[dict]] = defaultdict(list)
for report_indicator in report_indicators:
for key, (indicator_df, indicator_obj) in report_indicator.items():
indicator_dict[key].append(indicator_df)
indicator_his[key].append(indicator_obj.order_indicator_his)
report = {}
decision_details = pd.concat([getattr(d, "details") for d in decisions if hasattr(d, "details")])
for key in indicator_dict:
cur_dict = pd.concat(indicator_dict[key])
cur_his = pd.concat([_convert_indicator_to_dataframe(his) for his in indicator_his[key]])
cur_details = decision_details[decision_details.freq == key].set_index(["instrument", "datetime"])
if len(cur_details) > 0:
cur_details.pop("freq")
cur_his = cur_his.join(cur_details, how="outer")
report[key] = (cur_dict, cur_his)
return report
|
Generate backtest reports
Parameters
----------
decisions:
List of trade decisions.
report_indicators
List of indicator reports.
Returns
-------
|
_generate_report
|
python
|
microsoft/qlib
|
qlib/rl/contrib/backtest.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/contrib/backtest.py
|
MIT
|
def init_qlib(qlib_config: dict) -> None:
"""Initialize necessary resource to launch the workflow, including data direction, feature columns, etc..
Parameters
----------
qlib_config:
Qlib configuration.
Example::
{
"provider_uri_day": DATA_ROOT_DIR / "qlib_1d",
"provider_uri_1min": DATA_ROOT_DIR / "qlib_1min",
"feature_root_dir": DATA_ROOT_DIR / "qlib_handler_stock",
"feature_columns_today": [
"$open", "$high", "$low", "$close", "$vwap", "$bid", "$ask", "$volume",
"$bidV", "$bidV1", "$bidV3", "$bidV5", "$askV", "$askV1", "$askV3", "$askV5",
],
"feature_columns_yesterday": [
"$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1", "$bid_1", "$ask_1", "$volume_1",
"$bidV_1", "$bidV1_1", "$bidV3_1", "$bidV5_1", "$askV_1", "$askV1_1", "$askV3_1", "$askV5_1",
],
}
"""
def _convert_to_path(path: str | Path) -> Path:
return path if isinstance(path, Path) else Path(path)
provider_uri_map = {}
for granularity in ["1min", "5min", "day"]:
if f"provider_uri_{granularity}" in qlib_config:
provider_uri_map[f"{granularity}"] = _convert_to_path(qlib_config[f"provider_uri_{granularity}"]).as_posix()
qlib.init(
region=REG_CN,
auto_mount=False,
custom_ops=[DayLast, FFillNan, BFillNan, Date, Select, IsNull, IsInf, Cut, DayCumsum],
expression_cache=None,
calendar_provider={
"class": "LocalCalendarProvider",
"module_path": "qlib.data.data",
"kwargs": {
"backend": {
"class": "FileCalendarStorage",
"module_path": "qlib.data.storage.file_storage",
"kwargs": {"provider_uri_map": provider_uri_map},
},
},
},
feature_provider={
"class": "LocalFeatureProvider",
"module_path": "qlib.data.data",
"kwargs": {
"backend": {
"class": "FileFeatureStorage",
"module_path": "qlib.data.storage.file_storage",
"kwargs": {"provider_uri_map": provider_uri_map},
},
},
},
provider_uri=provider_uri_map,
kernels=1,
redis_port=-1,
clear_mem_cache=False, # init_qlib will be called for multiple times. Keep the cache for improving performance
)
|
Initialize necessary resource to launch the workflow, including data direction, feature columns, etc..
Parameters
----------
qlib_config:
Qlib configuration.
Example::
{
"provider_uri_day": DATA_ROOT_DIR / "qlib_1d",
"provider_uri_1min": DATA_ROOT_DIR / "qlib_1min",
"feature_root_dir": DATA_ROOT_DIR / "qlib_handler_stock",
"feature_columns_today": [
"$open", "$high", "$low", "$close", "$vwap", "$bid", "$ask", "$volume",
"$bidV", "$bidV1", "$bidV3", "$bidV5", "$askV", "$askV1", "$askV3", "$askV5",
],
"feature_columns_yesterday": [
"$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1", "$bid_1", "$ask_1", "$volume_1",
"$bidV_1", "$bidV1_1", "$bidV3_1", "$bidV5_1", "$askV_1", "$askV1_1", "$askV3_1", "$askV5_1",
],
}
|
init_qlib
|
python
|
microsoft/qlib
|
qlib/rl/data/integration.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/data/integration.py
|
MIT
|
def get_deal_price(self) -> pd.Series:
"""Return a pandas series that can be indexed with time.
See :attribute:`DealPriceType` for details."""
if self.deal_price_type in ("bid_or_ask", "bid_or_ask_fill"):
if self.order_dir is None:
raise ValueError("Order direction cannot be none when deal_price_type is not close.")
if self.order_dir == OrderDir.SELL:
col = "$bid0"
else: # BUY
col = "$ask0"
elif self.deal_price_type == "close":
col = "$close0"
else:
raise ValueError(f"Unsupported deal_price_type: {self.deal_price_type}")
price = self.data[col]
if self.deal_price_type == "bid_or_ask_fill":
if self.order_dir == OrderDir.SELL:
fill_col = "$ask0"
else:
fill_col = "$bid0"
price = price.replace(0, np.nan).fillna(self.data[fill_col])
return price
|
Return a pandas series that can be indexed with time.
See :attribute:`DealPriceType` for details.
|
get_deal_price
|
python
|
microsoft/qlib
|
qlib/rl/data/pickle_styled.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/data/pickle_styled.py
|
MIT
|
def load_orders(
order_path: Path,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> Sequence[Order]:
"""Load orders, and set start time and end time for the orders."""
start_time = start_time or pd.Timestamp("0:00:00")
end_time = end_time or pd.Timestamp("23:59:59")
if order_path.is_file():
order_df = pd.read_pickle(order_path)
else:
order_df = []
for file in order_path.iterdir():
order_data = pd.read_pickle(file)
order_df.append(order_data)
order_df = pd.concat(order_df)
order_df = order_df.reset_index()
# Legacy-style orders have "date" instead of "datetime"
if "date" in order_df.columns:
order_df = order_df.rename(columns={"date": "datetime"})
# Sometimes "date" are str rather than Timestamp
order_df["datetime"] = pd.to_datetime(order_df["datetime"])
orders: List[Order] = []
for _, row in order_df.iterrows():
# filter out orders with amount == 0
if row["amount"] <= 0:
continue
orders.append(
Order(
row["instrument"],
row["amount"],
OrderDir(int(row["order_type"])),
row["datetime"].replace(hour=start_time.hour, minute=start_time.minute, second=start_time.second),
row["datetime"].replace(hour=end_time.hour, minute=end_time.minute, second=end_time.second),
),
)
return orders
|
Load orders, and set start time and end time for the orders.
|
load_orders
|
python
|
microsoft/qlib
|
qlib/rl/data/pickle_styled.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/data/pickle_styled.py
|
MIT
|
def forward(self, batch: Batch) -> torch.Tensor:
"""
Input should be a dict (at least) containing:
- data_processed: [N, T, C]
- cur_step: [N] (int)
- cur_time: [N] (int)
- position_history: [N, S] (S is number of steps)
- target: [N]
- num_step: [N] (int)
- acquiring: [N] (0 or 1)
"""
inp = cast(FullHistoryObs, batch)
device = inp["data_processed"].device
sources, _ = self._source_features(inp, device)
assert len(sources) == self.num_sources
out = torch.cat(sources, -1)
return self.fc(out)
|
Input should be a dict (at least) containing:
- data_processed: [N, T, C]
- cur_step: [N] (int)
- cur_time: [N] (int)
- position_history: [N, S] (S is number of steps)
- target: [N]
- num_step: [N] (int)
- acquiring: [N] (0 or 1)
|
forward
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/network.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/network.py
|
MIT
|
def _iter_strategy(self, action: Optional[float] = None) -> SAOEStrategy:
"""Iterate the _collect_data_loop until we get the next yield SAOEStrategy."""
assert self._collect_data_loop is not None
obj = next(self._collect_data_loop) if action is None else self._collect_data_loop.send(action)
while not isinstance(obj, SAOEStrategy):
if isinstance(obj, BaseTradeDecision):
self.decisions.append(obj)
obj = next(self._collect_data_loop) if action is None else self._collect_data_loop.send(action)
assert isinstance(obj, SAOEStrategy)
return obj
|
Iterate the _collect_data_loop until we get the next yield SAOEStrategy.
|
_iter_strategy
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/simulator_qlib.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/simulator_qlib.py
|
MIT
|
def step(self, action: Optional[float]) -> None:
"""Execute one step or SAOE.
Parameters
----------
action (float):
The amount you wish to deal. The simulator doesn't guarantee all the amount to be successfully dealt.
"""
assert not self.done(), "Simulator has already done!"
try:
self._last_yielded_saoe_strategy = self._iter_strategy(action=action)
except StopIteration:
pass
assert self._executor is not None
|
Execute one step or SAOE.
Parameters
----------
action (float):
The amount you wish to deal. The simulator doesn't guarantee all the amount to be successfully dealt.
|
step
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/simulator_qlib.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/simulator_qlib.py
|
MIT
|
def step(self, amount: float) -> None:
"""Execute one step or SAOE.
Parameters
----------
amount
The amount you wish to deal. The simulator doesn't guarantee all the amount to be successfully dealt.
"""
assert not self.done()
self.market_price = self.market_vol = None # avoid misuse
exec_vol = self._split_exec_vol(amount)
assert self.market_price is not None
assert self.market_vol is not None
ticks_position = self.position - np.cumsum(exec_vol)
self.position -= exec_vol.sum()
if abs(self.position) < 1e-6:
self.position = 0.0
if self.position < -EPS or (exec_vol < -EPS).any():
raise ValueError(f"Execution volume is invalid: {exec_vol} (position = {self.position})")
# Get time index available for this step
time_index = self._get_ticks_slice(self.cur_time, self._next_time())
self.history_exec = self._dataframe_append(
self.history_exec,
SAOEMetrics(
# It should have the same keys with SAOEMetrics,
# but the values do not necessarily have the annotated type.
# Some values could be vectorized (e.g., exec_vol).
stock_id=self.order.stock_id,
datetime=time_index,
direction=self.order.direction,
market_volume=self.market_vol,
market_price=self.market_price,
amount=exec_vol,
inner_amount=exec_vol,
deal_amount=exec_vol,
trade_price=self.market_price,
trade_value=self.market_price * exec_vol,
position=ticks_position,
ffr=exec_vol / self.order.amount,
pa=price_advantage(self.market_price, self.twap_price, self.order.direction),
),
)
self.history_steps = self._dataframe_append(
self.history_steps,
[self._metrics_collect(self.cur_time, self.market_vol, self.market_price, amount, exec_vol)],
)
if self.done():
if self.env is not None:
self.env.logger.add_any("history_steps", self.history_steps, loglevel=LogLevel.DEBUG)
self.env.logger.add_any("history_exec", self.history_exec, loglevel=LogLevel.DEBUG)
self.metrics = self._metrics_collect(
self.ticks_index[0], # start time
self.history_exec["market_volume"],
self.history_exec["market_price"],
self.history_steps["amount"].sum(),
self.history_exec["deal_amount"],
)
# NOTE (yuge): It looks to me that it's the "correct" decision to
# put all the logs here, because only components like simulators themselves
# have the knowledge about what could appear in the logs, and what's the format.
# But I admit it's not necessarily the most convenient way.
# I'll rethink about it when we have the second environment
# Maybe some APIs like self.logger.enable_auto_log() ?
if self.env is not None:
for key, value in self.metrics.items():
if isinstance(value, float):
self.env.logger.add_scalar(key, value)
else:
self.env.logger.add_any(key, value)
self.cur_time = self._next_time()
self.cur_step += 1
|
Execute one step or SAOE.
Parameters
----------
amount
The amount you wish to deal. The simulator doesn't guarantee all the amount to be successfully dealt.
|
step
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/simulator_simple.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/simulator_simple.py
|
MIT
|
def _next_time(self) -> pd.Timestamp:
"""The "current time" (``cur_time``) for next step."""
# Look for next time on time index
current_loc = self.ticks_index.get_loc(self.cur_time)
next_loc = current_loc + self.ticks_per_step
# Calibrate the next location to multiple of ticks_per_step.
# This is to make sure that:
# as long as ticks_per_step is a multiple of something, each step won't cross morning and afternoon.
next_loc = next_loc - next_loc % self.ticks_per_step
if next_loc < len(self.ticks_index) and self.ticks_index[next_loc] < self.order.end_time:
return self.ticks_index[next_loc]
else:
return self.order.end_time
|
The "current time" (``cur_time``) for next step.
|
_next_time
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/simulator_simple.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/simulator_simple.py
|
MIT
|
def _split_exec_vol(self, exec_vol_sum: float) -> np.ndarray:
"""
Split the volume in each step into minutes, considering possible constraints.
This follows TWAP strategy.
"""
next_time = self._next_time()
# get the backtest data for next interval
self.market_vol = self.backtest_data.get_volume().loc[self.cur_time : next_time - EPS_T].to_numpy()
self.market_price = self.backtest_data.get_deal_price().loc[self.cur_time : next_time - EPS_T].to_numpy()
assert self.market_vol is not None and self.market_price is not None
# split the volume equally into each minute
exec_vol = np.repeat(exec_vol_sum / len(self.market_price), len(self.market_price))
# apply the volume threshold
market_vol_limit = self.vol_threshold * self.market_vol if self.vol_threshold is not None else np.inf
exec_vol = np.minimum(exec_vol, market_vol_limit) # type: ignore
# Complete all the order amount at the last moment.
if next_time >= self.order.end_time:
exec_vol[-1] += self.position - exec_vol.sum()
exec_vol = np.minimum(exec_vol, market_vol_limit) # type: ignore
return exec_vol
|
Split the volume in each step into minutes, considering possible constraints.
This follows TWAP strategy.
|
_split_exec_vol
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/simulator_simple.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/simulator_simple.py
|
MIT
|
def fill_missing_data(
original_data: np.ndarray,
fill_method: Callable = np.nanmedian,
) -> np.ndarray:
"""Fill missing data.
Parameters
----------
original_data
Original data without missing values.
fill_method
Method used to fill the missing data.
Returns
-------
The filled data.
"""
return np.nan_to_num(original_data, nan=fill_method(original_data))
|
Fill missing data.
Parameters
----------
original_data
Original data without missing values.
fill_method
Method used to fill the missing data.
Returns
-------
The filled data.
|
fill_missing_data
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/strategy.py
|
MIT
|
def generate_metrics_after_done(self) -> None:
"""Generate metrics once the upper level execution is done"""
self.metrics = self._collect_single_order_metric(
self.order,
self.backtest_data.ticks_index[0], # start time
self.history_exec["market_volume"],
self.history_exec["market_price"],
self.history_steps["amount"].sum(),
self.history_exec["deal_amount"],
)
|
Generate metrics once the upper level execution is done
|
generate_metrics_after_done
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/strategy.py
|
MIT
|
def generate_trade_decision(
self,
execute_result: list | None = None,
) -> Union[BaseTradeDecision, Generator[Any, Any, BaseTradeDecision]]:
"""
For SAOEStrategy, we need to update the `self._last_step_range` every time a decision is generated.
This operation should be invisible to developers, so we implement it in `generate_trade_decision()`
The concrete logic to generate decisions should be implemented in `_generate_trade_decision()`.
In other words, all subclass of `SAOEStrategy` should overwrite `_generate_trade_decision()` instead of
`generate_trade_decision()`.
"""
self._last_step_range = self.get_data_cal_avail_range(rtype="step")
decision = self._generate_trade_decision(execute_result)
if isinstance(decision, GeneratorType):
decision = yield from decision
return decision
|
For SAOEStrategy, we need to update the `self._last_step_range` every time a decision is generated.
This operation should be invisible to developers, so we implement it in `generate_trade_decision()`
The concrete logic to generate decisions should be implemented in `_generate_trade_decision()`.
In other words, all subclass of `SAOEStrategy` should overwrite `_generate_trade_decision()` instead of
`generate_trade_decision()`.
|
generate_trade_decision
|
python
|
microsoft/qlib
|
qlib/rl/order_execution/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/order_execution/strategy.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.