code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def parse_position(position: dict = None) -> pd.DataFrame:
"""Parse position dict to position DataFrame
:param position: position data
:return: position DataFrame;
.. code-block:: python
position_df = parse_position(positions)
print(position_df.head())
# status: 0-hold, -1-sell, 1-buy
amount cash count price status weight
instrument datetime
SZ000547 2017-01-04 44.154290 211405.285654 1 205.189575 1 0.031255
SZ300202 2017-01-04 60.638845 211405.285654 1 154.356506 1 0.032290
SH600158 2017-01-04 46.531681 211405.285654 1 153.895142 1 0.024704
SH600545 2017-01-04 197.173093 211405.285654 1 48.607037 1 0.033063
SZ000930 2017-01-04 103.938300 211405.285654 1 80.759453 1 0.028958
"""
position_weight_df = get_stock_weight_df(position)
# If the day does not exist, use the last weight
position_weight_df.fillna(method="ffill", inplace=True)
previous_data = {"date": None, "code_list": []}
result_df = pd.DataFrame()
for _trading_date, _value in position.items():
_value = _value.position
# pd_date type: pd.Timestamp
_cash = _value.pop("cash")
for _item in ["now_account_value"]:
if _item in _value:
_value.pop(_item)
_trading_day_df = pd.DataFrame.from_dict(_value, orient="index")
_trading_day_df["weight"] = position_weight_df.loc[_trading_date]
_trading_day_df["cash"] = _cash
_trading_day_df["date"] = _trading_date
# status: 0-hold, -1-sell, 1-buy
_trading_day_df["status"] = 0
# T not exist, T-1 exist, T sell
_cur_day_sell = set(previous_data["code_list"]) - set(_trading_day_df.index)
# T exist, T-1 not exist, T buy
_cur_day_buy = set(_trading_day_df.index) - set(previous_data["code_list"])
# Trading day buy
_trading_day_df.loc[_trading_day_df.index.isin(_cur_day_buy), "status"] = 1
# Trading day sell
if not result_df.empty:
_trading_day_sell_df = result_df.loc[
(result_df["date"] == previous_data["date"]) & (result_df.index.isin(_cur_day_sell))
].copy()
if not _trading_day_sell_df.empty:
_trading_day_sell_df["status"] = -1
_trading_day_sell_df["date"] = _trading_date
_trading_day_df = pd.concat([_trading_day_df, _trading_day_sell_df], sort=False)
result_df = pd.concat([result_df, _trading_day_df], sort=True)
previous_data = dict(
date=_trading_date,
code_list=_trading_day_df[_trading_day_df["status"] != -1].index,
)
result_df.reset_index(inplace=True)
result_df.rename(columns={"date": "datetime", "index": "instrument"}, inplace=True)
return result_df.set_index(["instrument", "datetime"])
|
Parse position dict to position DataFrame
:param position: position data
:return: position DataFrame;
.. code-block:: python
position_df = parse_position(positions)
print(position_df.head())
# status: 0-hold, -1-sell, 1-buy
amount cash count price status weight
instrument datetime
SZ000547 2017-01-04 44.154290 211405.285654 1 205.189575 1 0.031255
SZ300202 2017-01-04 60.638845 211405.285654 1 154.356506 1 0.032290
SH600158 2017-01-04 46.531681 211405.285654 1 153.895142 1 0.024704
SH600545 2017-01-04 197.173093 211405.285654 1 48.607037 1 0.033063
SZ000930 2017-01-04 103.938300 211405.285654 1 80.759453 1 0.028958
|
parse_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/parse_position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/parse_position.py
|
MIT
|
def _add_label_to_position(position_df: pd.DataFrame, label_data: pd.DataFrame) -> pd.DataFrame:
"""Concat position with custom label
:param position_df: position DataFrame
:param label_data:
:return: concat result
"""
_start_time = position_df.index.get_level_values(level="datetime").min()
_end_time = position_df.index.get_level_values(level="datetime").max()
label_data = label_data.loc(axis=0)[:, pd.to_datetime(_start_time) :]
_result_df = pd.concat([position_df, label_data], axis=1, sort=True).reindex(label_data.index)
_result_df = _result_df.loc[_result_df.index.get_level_values(1) <= _end_time]
return _result_df
|
Concat position with custom label
:param position_df: position DataFrame
:param label_data:
:return: concat result
|
_add_label_to_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/parse_position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/parse_position.py
|
MIT
|
def _add_bench_to_position(position_df: pd.DataFrame = None, bench: pd.Series = None) -> pd.DataFrame:
"""Concat position with bench
:param position_df: position DataFrame
:param bench: report normal data
:return: concat result
"""
_temp_df = position_df.reset_index(level="instrument")
# FIXME: After the stock is bought and sold, the rise and fall of the next trading day are calculated.
_temp_df["bench"] = bench.shift(-1)
res_df = _temp_df.set_index(["instrument", _temp_df.index])
return res_df
|
Concat position with bench
:param position_df: position DataFrame
:param bench: report normal data
:return: concat result
|
_add_bench_to_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/parse_position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/parse_position.py
|
MIT
|
def _calculate_label_rank(df: pd.DataFrame) -> pd.DataFrame:
"""calculate label rank
:param df:
:return:
"""
_label_name = "label"
def _calculate_day_value(g_df: pd.DataFrame):
g_df = g_df.copy()
g_df["rank_ratio"] = g_df[_label_name].rank(ascending=False) / len(g_df) * 100
# Sell: -1, Hold: 0, Buy: 1
for i in [-1, 0, 1]:
g_df.loc[g_df["status"] == i, "rank_label_mean"] = g_df[g_df["status"] == i]["rank_ratio"].mean()
g_df["excess_return"] = g_df[_label_name] - g_df[_label_name].mean()
return g_df
return df.groupby(level="datetime", group_keys=False).apply(_calculate_day_value)
|
calculate label rank
:param df:
:return:
|
_calculate_label_rank
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/parse_position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/parse_position.py
|
MIT
|
def get_position_data(
position: dict,
label_data: pd.DataFrame,
report_normal: pd.DataFrame = None,
calculate_label_rank=False,
start_date=None,
end_date=None,
) -> pd.DataFrame:
"""Concat position data with pred/report_normal
:param position: position data
:param report_normal: report normal, must be container 'bench' column
:param label_data:
:param calculate_label_rank:
:param start_date: start date
:param end_date: end date
:return: concat result,
columns: ['amount', 'cash', 'count', 'price', 'status', 'weight', 'label',
'rank_ratio', 'rank_label_mean', 'excess_return', 'score', 'bench']
index: ['instrument', 'date']
"""
_position_df = parse_position(position)
# Add custom_label, rank_ratio, rank_mean, and excess_return field
_position_df = _add_label_to_position(_position_df, label_data)
if calculate_label_rank:
_position_df = _calculate_label_rank(_position_df)
if report_normal is not None:
# Add bench field
_position_df = _add_bench_to_position(_position_df, report_normal["bench"])
_date_list = _position_df.index.get_level_values(level="datetime")
start_date = _date_list.min() if start_date is None else start_date
end_date = _date_list.max() if end_date is None else end_date
_position_df = _position_df.loc[(start_date <= _date_list) & (_date_list <= end_date)]
return _position_df
|
Concat position data with pred/report_normal
:param position: position data
:param report_normal: report normal, must be container 'bench' column
:param label_data:
:param calculate_label_rank:
:param start_date: start date
:param end_date: end date
:return: concat result,
columns: ['amount', 'cash', 'count', 'price', 'status', 'weight', 'label',
'rank_ratio', 'rank_label_mean', 'excess_return', 'score', 'bench']
index: ['instrument', 'date']
|
get_position_data
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/parse_position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/parse_position.py
|
MIT
|
def _get_figure_with_position(
position: dict, label_data: pd.DataFrame, start_date=None, end_date=None
) -> Iterable[go.Figure]:
"""Get average analysis figures
:param position: position
:param label_data:
:param start_date:
:param end_date:
:return:
"""
_position_df = get_position_data(
position,
label_data,
calculate_label_rank=True,
start_date=start_date,
end_date=end_date,
)
res_dict = dict()
_pos_gp = _position_df.groupby(level=1, group_keys=False)
for _item in _pos_gp:
_date = _item[0]
_day_df = _item[1]
_day_value = res_dict.setdefault(_date, {})
for _i, _name in {0: "Hold", 1: "Buy", -1: "Sell"}.items():
_temp_df = _day_df[_day_df["status"] == _i]
if _temp_df.empty:
_day_value[_name] = 0
else:
_day_value[_name] = _temp_df["rank_label_mean"].values[0]
_res_df = pd.DataFrame.from_dict(res_dict, orient="index")
# FIXME: support HIGH-FREQ
_res_df.index = _res_df.index.strftime("%Y-%m-%d")
for _col in _res_df.columns:
yield ScatterGraph(
_res_df.loc[:, [_col]],
layout=dict(
title=_col,
xaxis=dict(type="category", tickangle=45),
yaxis=dict(title="lable-rank-ratio: %"),
),
graph_kwargs=dict(mode="lines+markers"),
).figure
|
Get average analysis figures
:param position: position
:param label_data:
:param start_date:
:param end_date:
:return:
|
_get_figure_with_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/rank_label.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/rank_label.py
|
MIT
|
def _get_risk_analysis_data_with_report(
report_normal_df: pd.DataFrame,
# report_long_short_df: pd.DataFrame,
date: pd.Timestamp,
) -> pd.DataFrame:
"""Get risk analysis data with report
:param report_normal_df: report data
:param report_long_short_df: report data
:param date: date string
:return:
"""
analysis = dict()
# if not report_long_short_df.empty:
# analysis["pred_long"] = risk_analysis(report_long_short_df["long"])
# analysis["pred_short"] = risk_analysis(report_long_short_df["short"])
# analysis["pred_long_short"] = risk_analysis(report_long_short_df["long_short"])
if not report_normal_df.empty:
analysis["excess_return_without_cost"] = risk_analysis(report_normal_df["return"] - report_normal_df["bench"])
analysis["excess_return_with_cost"] = risk_analysis(
report_normal_df["return"] - report_normal_df["bench"] - report_normal_df["cost"]
)
analysis_df = pd.concat(analysis) # type: pd.DataFrame
analysis_df["date"] = date
return analysis_df
|
Get risk analysis data with report
:param report_normal_df: report data
:param report_long_short_df: report data
:param date: date string
:return:
|
_get_risk_analysis_data_with_report
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/risk_analysis.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/risk_analysis.py
|
MIT
|
def _get_all_risk_analysis(risk_df: pd.DataFrame) -> pd.DataFrame:
"""risk_df to standard
:param risk_df: risk data
:return:
"""
if risk_df is None:
return pd.DataFrame()
risk_df = risk_df.unstack()
risk_df.columns = risk_df.columns.droplevel(0)
return risk_df.drop("mean", axis=1)
|
risk_df to standard
:param risk_df: risk data
:return:
|
_get_all_risk_analysis
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/risk_analysis.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/risk_analysis.py
|
MIT
|
def _get_monthly_risk_analysis_with_report(report_normal_df: pd.DataFrame) -> pd.DataFrame:
"""Get monthly analysis data
:param report_normal_df:
# :param report_long_short_df:
:return:
"""
# Group by month
report_normal_gp = report_normal_df.groupby(
[report_normal_df.index.year, report_normal_df.index.month], group_keys=False
)
# report_long_short_gp = report_long_short_df.groupby(
# [report_long_short_df.index.year, report_long_short_df.index.month], group_keys=False
# )
gp_month = sorted(set(report_normal_gp.size().index))
_monthly_df = pd.DataFrame()
for gp_m in gp_month:
_m_report_normal = report_normal_gp.get_group(gp_m)
# _m_report_long_short = report_long_short_gp.get_group(gp_m)
if len(_m_report_normal) < 3:
# The month's data is less than 3, not displayed
# FIXME: If the trading day of a month is less than 3 days, a breakpoint will appear in the graph
continue
month_days = pd.Timestamp(year=gp_m[0], month=gp_m[1], day=1).days_in_month
_temp_df = _get_risk_analysis_data_with_report(
_m_report_normal,
# _m_report_long_short,
pd.Timestamp(year=gp_m[0], month=gp_m[1], day=month_days),
)
_monthly_df = pd.concat([_monthly_df, _temp_df], sort=False)
return _monthly_df
|
Get monthly analysis data
:param report_normal_df:
# :param report_long_short_df:
:return:
|
_get_monthly_risk_analysis_with_report
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/risk_analysis.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/risk_analysis.py
|
MIT
|
def _get_risk_analysis_figure(analysis_df: pd.DataFrame) -> Iterable[py.Figure]:
"""Get analysis graph figure
:param analysis_df:
:return:
"""
if analysis_df is None:
return []
_figure = SubplotsGraph(
_get_all_risk_analysis(analysis_df),
kind_map=dict(kind="BarGraph", kwargs={}),
subplots_kwargs={"rows": 1, "cols": 4},
).figure
return (_figure,)
|
Get analysis graph figure
:param analysis_df:
:return:
|
_get_risk_analysis_figure
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/risk_analysis.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/risk_analysis.py
|
MIT
|
def _get_monthly_risk_analysis_figure(report_normal_df: pd.DataFrame) -> Iterable[py.Figure]:
"""Get analysis monthly graph figure
:param report_normal_df:
:param report_long_short_df:
:return:
"""
# if report_normal_df is None and report_long_short_df is None:
# return []
if report_normal_df is None:
return []
# if report_normal_df is None:
# report_normal_df = pd.DataFrame(index=report_long_short_df.index)
# if report_long_short_df is None:
# report_long_short_df = pd.DataFrame(index=report_normal_df.index)
_monthly_df = _get_monthly_risk_analysis_with_report(
report_normal_df=report_normal_df,
# report_long_short_df=report_long_short_df,
)
for _feature in ["annualized_return", "max_drawdown", "information_ratio", "std"]:
_temp_df = _get_monthly_analysis_with_feature(_monthly_df, _feature)
yield ScatterGraph(
_temp_df,
layout=dict(title=_feature, xaxis=dict(type="category", tickangle=45)),
graph_kwargs={"mode": "lines+markers"},
).figure
|
Get analysis monthly graph figure
:param report_normal_df:
:param report_long_short_df:
:return:
|
_get_monthly_risk_analysis_figure
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/risk_analysis.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/risk_analysis.py
|
MIT
|
def score_ic_graph(pred_label: pd.DataFrame, show_notebook: bool = True, **kwargs) -> [list, tuple]:
"""score IC
Example:
.. code-block:: python
from qlib.data import D
from qlib.contrib.report import analysis_position
pred_df_dates = pred_df.index.get_level_values(level='datetime')
features_df = D.features(D.instruments('csi500'), ['Ref($close, -2)/Ref($close, -1)-1'], pred_df_dates.min(), pred_df_dates.max())
features_df.columns = ['label']
pred_label = pd.concat([features_df, pred], axis=1, sort=True).reindex(features_df.index)
analysis_position.score_ic_graph(pred_label)
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param show_notebook: whether to display graphics in notebook, the default is **True**.
:return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
"""
_ic_df = _get_score_ic(pred_label)
_figure = ScatterGraph(
_ic_df,
layout=dict(
title="Score IC",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(_ic_df.index))),
),
graph_kwargs={"mode": "lines+markers"},
).figure
if show_notebook:
ScatterGraph.show_graph_in_notebook([_figure])
else:
return (_figure,)
|
score IC
Example:
.. code-block:: python
from qlib.data import D
from qlib.contrib.report import analysis_position
pred_df_dates = pred_df.index.get_level_values(level='datetime')
features_df = D.features(D.instruments('csi500'), ['Ref($close, -2)/Ref($close, -1)-1'], pred_df_dates.min(), pred_df_dates.max())
features_df.columns = ['label']
pred_label = pd.concat([features_df, pred], axis=1, sort=True).reindex(features_df.index)
analysis_position.score_ic_graph(pred_label)
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param show_notebook: whether to display graphics in notebook, the default is **True**.
:return: if show_notebook is True, display in notebook; else return **plotly.graph_objs.Figure** list.
|
score_ic_graph
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/score_ic.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/score_ic.py
|
MIT
|
def __init__(self, dataset: pd.DataFrame):
"""
Parameters
----------
dataset : pd.DataFrame
We often have multiple columns for dataset. Each column corresponds to one sub figure.
There will be a datatime column in the index levels.
Aggretation will be used for more summarized metrics overtime.
Here is an example of data:
.. code-block::
return
datetime instrument
2007-02-06 equity_tpx 0.010087
equity_spx 0.000786
"""
self._dataset = dataset
with TimeInspector.logt("calc_stat_values"):
self.calc_stat_values()
|
Parameters
----------
dataset : pd.DataFrame
We often have multiple columns for dataset. Each column corresponds to one sub figure.
There will be a datatime column in the index levels.
Aggretation will be used for more summarized metrics overtime.
Here is an example of data:
.. code-block::
return
datetime instrument
2007-02-06 equity_tpx 0.010087
equity_spx 0.000786
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/report/data/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/data/base.py
|
MIT
|
def __init__(
self,
conf_path: Union[str, Path],
exp_name: Optional[str] = None,
horizon: Optional[int] = 20,
step: int = 20,
h_path: Optional[str] = None,
train_start: Optional[str] = None,
test_end: Optional[str] = None,
task_ext_conf: Optional[dict] = None,
rolling_exp: Optional[str] = None,
) -> None:
"""
Parameters
----------
conf_path : str
Path to the config for rolling.
exp_name : Optional[str]
The exp name of the outputs (Output is a record which contains the concatenated predictions of rolling records).
horizon: Optional[int] = 20,
The horizon of the prediction target.
This is used to override the prediction horizon of the file.
h_path : Optional[str]
It is other data source that is dumped as a handler. It will override the data handler section in the config.
If it is not given, it will create a customized cache for the handler when `enable_handler_cache=True`
test_end : Optional[str]
the test end for the data. It is typically used together with the handler
You can do the same thing with task_ext_conf in a more complicated way
train_start : Optional[str]
the train start for the data. It is typically used together with the handler.
You can do the same thing with task_ext_conf in a more complicated way
task_ext_conf : Optional[dict]
some option to update the task config.
rolling_exp : Optional[str]
The name for the experiments for rolling.
It will contains a lot of record in an experiment. Each record corresponds to a specific rolling.
Please note that it is different from the final experiments
"""
self.logger = get_module_logger("Rolling")
self.conf_path = Path(conf_path)
self.exp_name = exp_name
self._rid = None # the final combined recorder id in `exp_name`
self.step = step
assert horizon is not None, "Current version does not support extracting horizon from the underlying dataset"
self.horizon = horizon
if rolling_exp is None:
datetime_suffix = pd.Timestamp.now().strftime("%Y%m%d%H%M%S")
self.rolling_exp = f"rolling_models_{datetime_suffix}"
else:
self.rolling_exp = rolling_exp
self.logger.warning(
"Using user specifiied name for rolling models. So the experiment names duplicateds. "
"Please manually remove your experiment for rolling model with command like `rm -r mlruns`."
" Otherwise it will prevents the creating of experimen with same name"
)
self.train_start = train_start
self.test_end = test_end
self.task_ext_conf = task_ext_conf
self.h_path = h_path
# FIXME:
# - the qlib_init section will be ignored by me.
# - So we have to design a priority mechanism to solve this issue.
|
Parameters
----------
conf_path : str
Path to the config for rolling.
exp_name : Optional[str]
The exp name of the outputs (Output is a record which contains the concatenated predictions of rolling records).
horizon: Optional[int] = 20,
The horizon of the prediction target.
This is used to override the prediction horizon of the file.
h_path : Optional[str]
It is other data source that is dumped as a handler. It will override the data handler section in the config.
If it is not given, it will create a customized cache for the handler when `enable_handler_cache=True`
test_end : Optional[str]
the test end for the data. It is typically used together with the handler
You can do the same thing with task_ext_conf in a more complicated way
train_start : Optional[str]
the train start for the data. It is typically used together with the handler.
You can do the same thing with task_ext_conf in a more complicated way
task_ext_conf : Optional[dict]
some option to update the task config.
rolling_exp : Optional[str]
The name for the experiments for rolling.
It will contains a lot of record in an experiment. Each record corresponds to a specific rolling.
Please note that it is different from the final experiments
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/base.py
|
MIT
|
def _replace_handler_with_cache(self, task: dict):
"""
Due to the data processing part in original rolling is slow. So we have to
This class tries to add more feature
"""
if self.h_path is not None:
h_path = Path(self.h_path)
task["dataset"]["kwargs"]["handler"] = f"file://{h_path}"
else:
task = replace_task_handler_with_cache(task, self.conf_path.parent)
return task
|
Due to the data processing part in original rolling is slow. So we have to
This class tries to add more feature
|
_replace_handler_with_cache
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/base.py
|
MIT
|
def basic_task(self, enable_handler_cache: Optional[bool] = True):
"""
The basic task may not be the exactly same as the config from `conf_path` from __init__ due to
- some parameters could be overriding by some parameters from __init__
- user could implementing sublcass to change it for higher performance
"""
task: dict = self._raw_conf()["task"]
task = deepcopy(task)
# modify dataset horizon
# NOTE:
# It assumpts that the label can be modifiled in the handler's kwargs
# But is not always a valid. It is only valid in the predefined dataset `Alpha158` & `Alpha360`
if self.horizon is None:
# TODO:
# - get horizon automatically from the expression!!!!
raise NotImplementedError(f"This type of input is not supported")
else:
if enable_handler_cache and self.h_path is not None:
self.logger.info("Fail to override the horizon due to data handler cache")
else:
self.logger.info("The prediction horizon is overrided")
if isinstance(task["dataset"]["kwargs"]["handler"], dict):
task["dataset"]["kwargs"]["handler"]["kwargs"]["label"] = [
"Ref($close, -{}) / Ref($close, -1) - 1".format(self.horizon + 1)
]
else:
self.logger.warning("Try to automatically configure the lablel but failed.")
if self.h_path is not None or enable_handler_cache:
# if we already have provided data source or we want to create one
task = self._replace_handler_with_cache(task)
task = self._update_start_end_time(task)
if self.task_ext_conf is not None:
task = update_config(task, self.task_ext_conf)
self.logger.info(task)
return task
|
The basic task may not be the exactly same as the config from `conf_path` from __init__ due to
- some parameters could be overriding by some parameters from __init__
- user could implementing sublcass to change it for higher performance
|
basic_task
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/base.py
|
MIT
|
def run_basic_task(self):
"""
Run the basic task without rolling.
This is for fast testing for model tunning.
"""
task = self.basic_task()
print(task)
trainer = TrainerR(experiment_name=self.exp_name)
trainer([task])
|
Run the basic task without rolling.
This is for fast testing for model tunning.
|
run_basic_task
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/base.py
|
MIT
|
def get_task_list(self) -> List[dict]:
"""return a batch of tasks for rolling."""
task = self.basic_task()
task_l = task_generator(
task, RollingGen(step=self.step, trunc_days=self.horizon + 1)
) # the last two days should be truncated to avoid information leakage
for t in task_l:
# when we rolling tasks. No further analyis is needed.
# analyis are postponed to the final ensemble.
t["record"] = ["qlib.workflow.record_temp.SignalRecord"]
return task_l
|
return a batch of tasks for rolling.
|
get_task_list
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/base.py
|
MIT
|
def __init__(
self,
sim_task_model: UTIL_MODEL_TYPE = "gbdt",
meta_1st_train_end: Optional[str] = None,
alpha: float = 0.01,
loss_skip_thresh: int = 50,
fea_imp_n: Optional[int] = 30,
meta_data_proc: Optional[str] = "V01",
segments: Union[float, str] = 0.62,
hist_step_n: int = 30,
working_dir: Optional[Union[str, Path]] = None,
**kwargs,
):
"""
Parameters
----------
sim_task_model: Literal["linear", "gbdt"] = "gbdt",
The model for calculating similarity between data.
meta_1st_train_end: Optional[str]
the datetime of training end of the first meta_task
alpha: float
Setting the L2 regularization for ridge
The `alpha` is only passed to MetaModelDS (it is not passed to sim_task_model currently..)
loss_skip_thresh: int
The thresh to skip the loss calculation for each day. If the number of item is less than it, it will skip the loss on that day.
meta_data_proc : Optional[str]
How we process the meta dataset for learning meta model.
segments : Union[float, str]
if segments is a float:
The ratio of training data in the meta task dataset
if segments is a string:
it will try its best to put its data in training and ensure that the date `segments` is in the test set
"""
# NOTE:
# the horizon must match the meaning in the base task template
self.meta_exp_name = "DDG-DA"
self.sim_task_model: UTIL_MODEL_TYPE = sim_task_model # The model to capture the distribution of data.
self.alpha = alpha
self.meta_1st_train_end = meta_1st_train_end
super().__init__(**kwargs)
self.working_dir = self.conf_path.parent if working_dir is None else Path(working_dir)
self.proxy_hd = self.working_dir / "handler_proxy.pkl"
self.fea_imp_n = fea_imp_n
self.meta_data_proc = meta_data_proc
self.loss_skip_thresh = loss_skip_thresh
self.segments = segments
self.hist_step_n = hist_step_n
|
Parameters
----------
sim_task_model: Literal["linear", "gbdt"] = "gbdt",
The model for calculating similarity between data.
meta_1st_train_end: Optional[str]
the datetime of training end of the first meta_task
alpha: float
Setting the L2 regularization for ridge
The `alpha` is only passed to MetaModelDS (it is not passed to sim_task_model currently..)
loss_skip_thresh: int
The thresh to skip the loss calculation for each day. If the number of item is less than it, it will skip the loss on that day.
meta_data_proc : Optional[str]
How we process the meta dataset for learning meta model.
segments : Union[float, str]
if segments is a float:
The ratio of training data in the meta task dataset
if segments is a string:
it will try its best to put its data in training and ensure that the date `segments` is in the test set
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def _adjust_task(self, task: dict, astype: UTIL_MODEL_TYPE):
"""
Base on the original task, we need to do some extra things.
For example:
- GBDT for calculating feature importance
- Linear or GBDT for calculating similarity
- Datset (well processed) that aligned to Linear that for meta learning
So we may need to change the dataset and model for the special purpose and other settings remains the same.
"""
# NOTE: here is just for aligning with previous implementation
# It is not necessary for the current implementation
handler = task["dataset"].setdefault("kwargs", {}).setdefault("handler", {})
if astype == "gbdt":
task["model"] = LGBM_MODEL
if isinstance(handler, dict):
# We don't need preprocessing when using GBDT model
for k in ["infer_processors", "learn_processors"]:
if k in handler.setdefault("kwargs", {}):
handler["kwargs"].pop(k)
elif astype == "linear":
task["model"] = LINEAR_MODEL
if isinstance(handler, dict):
handler["kwargs"].update(PROC_ARGS)
else:
self.logger.warning("The handler can't be adjusted.")
else:
raise ValueError(f"astype not supported: {astype}")
return task
|
Base on the original task, we need to do some extra things.
For example:
- GBDT for calculating feature importance
- Linear or GBDT for calculating similarity
- Datset (well processed) that aligned to Linear that for meta learning
So we may need to change the dataset and model for the special purpose and other settings remains the same.
|
_adjust_task
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def _dump_data_for_proxy_model(self):
"""
Dump data for training meta model.
The meta model will be trained upon the proxy forecasting model.
This dataset is for the proxy forecasting model.
"""
# NOTE: adjusting to `self.sim_task_model` just for aligning with previous implementation.
# In previous version. The data for proxy model is using sim_task_model's way for processing
task = self._adjust_task(self.basic_task(enable_handler_cache=False), self.sim_task_model)
task = replace_task_handler_with_cache(task, self.working_dir)
# if self.meta_data_proc is not None:
# else:
# # Otherwise, we don't need futher processing
# task = self.basic_task()
dataset = init_instance_by_config(task["dataset"])
prep_ds = dataset.prepare(slice(None), col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
feature_df = prep_ds["feature"]
label_df = prep_ds["label"]
if self.fea_imp_n is not None:
fi = self._get_feature_importance()
col_selected = fi.nlargest(self.fea_imp_n)
feature_selected = feature_df.loc[:, col_selected.index]
else:
feature_selected = feature_df
if self.meta_data_proc == "V01":
feature_selected = feature_selected.groupby("datetime", group_keys=False).apply(
lambda df: (df - df.mean()).div(df.std())
)
feature_selected = feature_selected.fillna(0.0)
df_all = {
"label": label_df.reindex(feature_selected.index),
"feature": feature_selected,
}
df_all = pd.concat(df_all, axis=1)
df_all.to_pickle(self.working_dir / "fea_label_df.pkl")
# dump data in handler format for aligning the interface
handler = DataHandlerLP(
data_loader={
"class": "qlib.data.dataset.loader.StaticDataLoader",
"kwargs": {"config": self.working_dir / "fea_label_df.pkl"},
}
)
handler.to_pickle(self.working_dir / self.proxy_hd, dump_all=True)
|
Dump data for training meta model.
The meta model will be trained upon the proxy forecasting model.
This dataset is for the proxy forecasting model.
|
_dump_data_for_proxy_model
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def _dump_meta_ipt(self):
"""
Dump data for training meta model.
This function will dump the input data for meta model
"""
# According to the experiments, the choice of the model type is very important for achieving good results
sim_task = self._adjust_task(self.basic_task(enable_handler_cache=False), astype=self.sim_task_model)
sim_task = replace_task_handler_with_cache(sim_task, self.working_dir)
if self.sim_task_model == "gbdt":
sim_task["model"].setdefault("kwargs", {}).update({"early_stopping_rounds": None, "num_boost_round": 150})
exp_name_sim = f"data_sim_s{self.step}"
internal_data = InternalData(sim_task, self.step, exp_name=exp_name_sim)
internal_data.setup(trainer=TrainerR)
with self._internal_data_path.open("wb") as f:
pickle.dump(internal_data, f)
|
Dump data for training meta model.
This function will dump the input data for meta model
|
_dump_meta_ipt
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def _train_meta_model(self, fill_method="max"):
"""
training a meta model based on a simplified linear proxy model;
"""
# 1) leverage the simplified proxy forecasting model to train meta model.
# - Only the dataset part is important, in current version of meta model will integrate the
# NOTE:
# - The train_start for training meta model does not necessarily align with final rolling
# But please select a right time to make sure the finnal rolling tasks are not leaked in the training data.
# - The test_start is automatically aligned to the next day of test_end. Validation is ignored.
train_start = "2008-01-01" if self.train_start is None else self.train_start
train_end = "2010-12-31" if self.meta_1st_train_end is None else self.meta_1st_train_end
test_start = (pd.Timestamp(train_end) + pd.Timedelta(days=1)).strftime("%Y-%m-%d")
proxy_forecast_model_task = {
# "model": "qlib.contrib.model.linear.LinearModel",
"dataset": {
"class": "qlib.data.dataset.DatasetH",
"kwargs": {
"handler": f"file://{(self.working_dir / self.proxy_hd).absolute()}",
"segments": {
"train": (train_start, train_end),
"test": (test_start, self.basic_task()["dataset"]["kwargs"]["segments"]["test"][1]),
},
},
},
# "record": ["qlib.workflow.record_temp.SignalRecord"]
}
# the proxy_forecast_model_task will be used to create meta tasks.
# The test date of first task will be 2011-01-01. Each test segment will be about 20days
# The tasks include all training tasks and test tasks.
# 2) preparing meta dataset
kwargs = dict(
task_tpl=proxy_forecast_model_task,
step=self.step,
segments=self.segments, # keep test period consistent with the dataset yaml
trunc_days=1 + self.horizon,
hist_step_n=self.hist_step_n,
fill_method=fill_method,
rolling_ext_days=0,
)
# NOTE:
# the input of meta model (internal data) are shared between proxy model and final forecasting model
# but their task test segment are not aligned! It worked in my previous experiment.
# So the misalignment will not affect the effectiveness of the method.
with self._internal_data_path.open("rb") as f:
internal_data = pickle.load(f)
md = MetaDatasetDS(exp_name=internal_data, **kwargs)
# 3) train and logging meta model
with R.start(experiment_name=self.meta_exp_name):
R.log_params(**kwargs)
mm = MetaModelDS(
step=self.step,
hist_step_n=kwargs["hist_step_n"],
lr=0.001,
max_epoch=30,
seed=43,
alpha=self.alpha,
loss_skip_thresh=self.loss_skip_thresh,
)
mm.fit(md)
R.save_objects(model=mm)
|
training a meta model based on a simplified linear proxy model;
|
_train_meta_model
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def get_task_list(self):
"""
Leverage meta-model for inference:
- Given
- baseline tasks
- input for meta model(internal data)
- meta model (its learnt knowledge on proxy forecasting model is expected to transfer to normal forecasting model)
"""
# 1) get meta model
exp = R.get_exp(experiment_name=self.meta_exp_name)
rec = exp.list_recorders(rtype=exp.RT_L)[0]
meta_model: MetaModelDS = rec.load_object("model")
# 2)
# we are transfer to knowledge of meta model to final forecasting tasks.
# Create MetaTaskDataset for the final forecasting tasks
# Aligning the setting of it to the MetaTaskDataset when training Meta model is necessary
# 2.1) get previous config
param = rec.list_params()
trunc_days = int(param["trunc_days"])
step = int(param["step"])
hist_step_n = int(param["hist_step_n"])
fill_method = param.get("fill_method", "max")
task_l = super().get_task_list()
# 2.2) create meta dataset for final dataset
kwargs = dict(
task_tpl=task_l,
step=step,
segments=0.0, # all the tasks are for testing
trunc_days=trunc_days,
hist_step_n=hist_step_n,
fill_method=fill_method,
task_mode=MetaTask.PROC_MODE_TRANSFER,
)
with self._internal_data_path.open("rb") as f:
internal_data = pickle.load(f)
mds = MetaDatasetDS(exp_name=internal_data, **kwargs)
# 3) meta model make inference and get new qlib task
new_tasks = meta_model.inference(mds)
with self._task_path.open("wb") as f:
pickle.dump(new_tasks, f)
return new_tasks
|
Leverage meta-model for inference:
- Given
- baseline tasks
- input for meta model(internal data)
- meta model (its learnt knowledge on proxy forecasting model is expected to transfer to normal forecasting model)
|
get_task_list
|
python
|
microsoft/qlib
|
qlib/contrib/rolling/ddgda.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/rolling/ddgda.py
|
MIT
|
def __init__(
self,
model,
dataset,
topk,
order_generator_cls_or_obj=OrderGenWInteract,
max_sold_weight=1.0,
risk_degree=0.95,
buy_method="first_fill",
trade_exchange=None,
level_infra=None,
common_infra=None,
**kwargs,
):
"""
Parameters
----------
topk : int
top-N stocks to buy
risk_degree : float
position percentage of total value buy_method:
rank_fill: assign the weight stocks that rank high first(1/topk max)
average_fill: assign the weight to the stocks rank high averagely.
"""
super(SoftTopkStrategy, self).__init__(
model, dataset, order_generator_cls_or_obj, trade_exchange, level_infra, common_infra, **kwargs
)
self.topk = topk
self.max_sold_weight = max_sold_weight
self.risk_degree = risk_degree
self.buy_method = buy_method
|
Parameters
----------
topk : int
top-N stocks to buy
risk_degree : float
position percentage of total value buy_method:
rank_fill: assign the weight stocks that rank high first(1/topk max)
average_fill: assign the weight to the stocks rank high averagely.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/cost_control.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/cost_control.py
|
MIT
|
def generate_target_weight_position(self, score, current, trade_start_time, trade_end_time):
"""
Parameters
----------
score:
pred score for this trade date, pd.Series, index is stock_id, contain 'score' column
current:
current position, use Position() class
trade_date:
trade date
generate target position from score for this date and the current position
The cache is not considered in the position
"""
# TODO:
# If the current stock list is more than topk(eg. The weights are modified
# by risk control), the weight will not be handled correctly.
buy_signal_stocks = set(score.sort_values(ascending=False).iloc[: self.topk].index)
cur_stock_weight = current.get_stock_weight_dict(only_stock=True)
if len(cur_stock_weight) == 0:
final_stock_weight = {code: 1 / self.topk for code in buy_signal_stocks}
else:
final_stock_weight = copy.deepcopy(cur_stock_weight)
sold_stock_weight = 0.0
for stock_id in final_stock_weight:
if stock_id not in buy_signal_stocks:
sw = min(self.max_sold_weight, final_stock_weight[stock_id])
sold_stock_weight += sw
final_stock_weight[stock_id] -= sw
if self.buy_method == "first_fill":
for stock_id in buy_signal_stocks:
add_weight = min(
max(1 / self.topk - final_stock_weight.get(stock_id, 0), 0.0),
sold_stock_weight,
)
final_stock_weight[stock_id] = final_stock_weight.get(stock_id, 0.0) + add_weight
sold_stock_weight -= add_weight
elif self.buy_method == "average_fill":
for stock_id in buy_signal_stocks:
final_stock_weight[stock_id] = final_stock_weight.get(stock_id, 0.0) + sold_stock_weight / len(
buy_signal_stocks
)
else:
raise ValueError("Buy method not found")
return final_stock_weight
|
Parameters
----------
score:
pred score for this trade date, pd.Series, index is stock_id, contain 'score' column
current:
current position, use Position() class
trade_date:
trade date
generate target position from score for this date and the current position
The cache is not considered in the position
|
generate_target_weight_position
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/cost_control.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/cost_control.py
|
MIT
|
def generate_order_list_from_target_weight_position(
self,
current: Position,
trade_exchange: Exchange,
target_weight_position: dict,
risk_degree: float,
pred_start_time: pd.Timestamp,
pred_end_time: pd.Timestamp,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
) -> list:
"""generate_order_list_from_target_weight_position
:param current: The current position
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position: {stock_id : weight}
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list
"""
raise NotImplementedError()
|
generate_order_list_from_target_weight_position
:param current: The current position
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position: {stock_id : weight}
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list
|
generate_order_list_from_target_weight_position
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/order_generator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/order_generator.py
|
MIT
|
def generate_order_list_from_target_weight_position(
self,
current: Position,
trade_exchange: Exchange,
target_weight_position: dict,
risk_degree: float,
pred_start_time: pd.Timestamp,
pred_end_time: pd.Timestamp,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
) -> list:
"""generate_order_list_from_target_weight_position
No adjustment for for the nontradable share.
All the tadable value is assigned to the tadable stock according to the weight.
if interact == True, will use the price at trade date to generate order list
else, will only use the price before the trade date to generate order list
:param current:
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position:
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list
"""
if target_weight_position is None:
return []
# calculate current_tradable_value
current_amount_dict = current.get_stock_amount_dict()
current_total_value = trade_exchange.calculate_amount_position_value(
amount_dict=current_amount_dict,
start_time=trade_start_time,
end_time=trade_end_time,
only_tradable=False,
)
current_tradable_value = trade_exchange.calculate_amount_position_value(
amount_dict=current_amount_dict,
start_time=trade_start_time,
end_time=trade_end_time,
only_tradable=True,
)
# add cash
current_tradable_value += current.get_cash()
reserved_cash = (1.0 - risk_degree) * (current_total_value + current.get_cash())
current_tradable_value -= reserved_cash
if current_tradable_value < 0:
# if you sell all the tradable stock can not meet the reserved
# value. Then just sell all the stocks
target_amount_dict = copy.deepcopy(current_amount_dict.copy())
for stock_id in list(target_amount_dict.keys()):
if trade_exchange.is_stock_tradable(stock_id, start_time=trade_start_time, end_time=trade_end_time):
del target_amount_dict[stock_id]
else:
# consider cost rate
current_tradable_value /= 1 + max(trade_exchange.close_cost, trade_exchange.open_cost)
# strategy 1 : generate amount_position by weight_position
# Use API in Exchange()
target_amount_dict = trade_exchange.generate_amount_position_from_weight_position(
weight_position=target_weight_position,
cash=current_tradable_value,
start_time=trade_start_time,
end_time=trade_end_time,
)
order_list = trade_exchange.generate_order_for_target_amount_position(
target_position=target_amount_dict,
current_position=current_amount_dict,
start_time=trade_start_time,
end_time=trade_end_time,
)
return order_list
|
generate_order_list_from_target_weight_position
No adjustment for for the nontradable share.
All the tadable value is assigned to the tadable stock according to the weight.
if interact == True, will use the price at trade date to generate order list
else, will only use the price before the trade date to generate order list
:param current:
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position:
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list
|
generate_order_list_from_target_weight_position
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/order_generator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/order_generator.py
|
MIT
|
def generate_order_list_from_target_weight_position(
self,
current: Position,
trade_exchange: Exchange,
target_weight_position: dict,
risk_degree: float,
pred_start_time: pd.Timestamp,
pred_end_time: pd.Timestamp,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
) -> list:
"""generate_order_list_from_target_weight_position
generate order list directly not using the information (e.g. whether can be traded, the accurate trade price)
at trade date.
In target weight position, generating order list need to know the price of objective stock in trade date,
but we cannot get that
value when do not interact with exchange, so we check the %close price at pred_date or price recorded
in current position.
:param current:
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position:
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list of generated orders
"""
if target_weight_position is None:
return []
risk_total_value = risk_degree * current.calculate_value()
current_stock = current.get_stock_list()
amount_dict = {}
for stock_id in target_weight_position:
# Current rule will ignore the stock that not hold and cannot be traded at predict date
if trade_exchange.is_stock_tradable(
stock_id=stock_id, start_time=trade_start_time, end_time=trade_end_time
) and trade_exchange.is_stock_tradable(
stock_id=stock_id, start_time=pred_start_time, end_time=pred_end_time
):
amount_dict[stock_id] = (
risk_total_value
* target_weight_position[stock_id]
/ trade_exchange.get_close(stock_id, start_time=pred_start_time, end_time=pred_end_time)
)
# TODO: Qlib use None to represent trading suspension.
# So last close price can't be the estimated trading price.
# Maybe a close price with forward fill will be a better solution.
elif stock_id in current_stock:
amount_dict[stock_id] = (
risk_total_value * target_weight_position[stock_id] / current.get_stock_price(stock_id)
)
else:
continue
order_list = trade_exchange.generate_order_for_target_amount_position(
target_position=amount_dict,
current_position=current.get_stock_amount_dict(),
start_time=trade_start_time,
end_time=trade_end_time,
)
return order_list
|
generate_order_list_from_target_weight_position
generate order list directly not using the information (e.g. whether can be traded, the accurate trade price)
at trade date.
In target weight position, generating order list need to know the price of objective stock in trade date,
but we cannot get that
value when do not interact with exchange, so we check the %close price at pred_date or price recorded
in current position.
:param current:
:type current: Position
:param trade_exchange:
:type trade_exchange: Exchange
:param target_weight_position:
:type target_weight_position: dict
:param risk_degree:
:type risk_degree: float
:param pred_start_time:
:type pred_start_time: pd.Timestamp
:param pred_end_time:
:type pred_end_time: pd.Timestamp
:param trade_start_time:
:type trade_start_time: pd.Timestamp
:param trade_end_time:
:type trade_end_time: pd.Timestamp
:rtype: list of generated orders
|
generate_order_list_from_target_weight_position
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/order_generator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/order_generator.py
|
MIT
|
def __init__(
self,
outer_trade_decision: BaseTradeDecision = None,
instruments: Union[List, str] = "csi300",
freq: str = "day",
trade_exchange: Exchange = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
instruments : Union[List, str], optional
instruments of EMA signal, by default "csi300"
freq : str, optional
freq of EMA signal, by default "day"
Note: `freq` may be different from `time_per_step`
"""
if instruments is None:
warnings.warn("`instruments` is not set, will load all stocks")
self.instruments = "all"
elif isinstance(instruments, str):
self.instruments = D.instruments(instruments)
elif isinstance(instruments, List):
self.instruments = instruments
self.freq = freq
super(SBBStrategyEMA, self).__init__(
outer_trade_decision, level_infra, common_infra, trade_exchange=trade_exchange, **kwargs
)
|
Parameters
----------
instruments : Union[List, str], optional
instruments of EMA signal, by default "csi300"
freq : str, optional
freq of EMA signal, by default "day"
Note: `freq` may be different from `time_per_step`
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/rule_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/rule_strategy.py
|
MIT
|
def __init__(
self,
lamb: float = 1e-6,
eta: float = 2.5e-6,
window_size: int = 20,
outer_trade_decision: BaseTradeDecision = None,
instruments: Union[List, str] = "csi300",
freq: str = "day",
trade_exchange: Exchange = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
instruments : Union[List, str], optional
instruments of Volatility, by default "csi300"
freq : str, optional
freq of Volatility, by default "day"
Note: `freq` may be different from `time_per_step`
"""
self.lamb = lamb
self.eta = eta
self.window_size = window_size
if instruments is None:
warnings.warn("`instruments` is not set, will load all stocks")
self.instruments = "all"
if isinstance(instruments, str):
self.instruments = D.instruments(instruments)
self.freq = freq
super(ACStrategy, self).__init__(
outer_trade_decision, level_infra, common_infra, trade_exchange=trade_exchange, **kwargs
)
|
Parameters
----------
instruments : Union[List, str], optional
instruments of Volatility, by default "csi300"
freq : str, optional
freq of Volatility, by default "day"
Note: `freq` may be different from `time_per_step`
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/rule_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/rule_strategy.py
|
MIT
|
def __init__(
self,
trade_range: Union[Tuple[int, int], TradeRange], # The range is closed on both left and right.
sample_ratio: float = 1.0,
volume_ratio: float = 0.01,
market: str = "all",
direction: int = Order.BUY,
*args,
**kwargs,
):
"""
Parameters
----------
trade_range : Tuple
please refer to the `trade_range` parameter of BaseStrategy
sample_ratio : float
the ratio of all orders are sampled
volume_ratio : float
the volume of the total day
raito of the total volume of a specific day
market : str
stock pool for sampling
"""
super().__init__(*args, **kwargs)
self.sample_ratio = sample_ratio
self.volume_ratio = volume_ratio
self.market = market
self.direction = direction
exch: Exchange = self.common_infra.get("trade_exchange")
# TODO: this can't be online
self.volume = D.features(
D.instruments(market), ["Mean(Ref($volume, 1), 10)"], start_time=exch.start_time, end_time=exch.end_time
)
self.volume_df = self.volume.iloc[:, 0].unstack()
self.trade_range = trade_range
|
Parameters
----------
trade_range : Tuple
please refer to the `trade_range` parameter of BaseStrategy
sample_ratio : float
the ratio of all orders are sampled
volume_ratio : float
the volume of the total day
raito of the total volume of a specific day
market : str
stock pool for sampling
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/rule_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/rule_strategy.py
|
MIT
|
def generate_trade_decision(self, execute_result=None) -> TradeDecisionWO:
"""
Parameters
----------
execute_result :
execute_result will be ignored in FileOrderStrategy
"""
oh: OrderHelper = self.common_infra.get("trade_exchange").get_order_helper()
start, _ = self.trade_calendar.get_step_time()
# CONVERSION: the bar is indexed by the time
try:
df = self.order_df.loc(axis=0)[start]
except KeyError:
return TradeDecisionWO([], self)
else:
order_list = []
for idx, row in df.iterrows():
order_list.append(
oh.create(
code=idx,
amount=row["amount"],
direction=Order.parse_dir(row["direction"]),
)
)
return TradeDecisionWO(order_list, self, self.trade_range)
|
Parameters
----------
execute_result :
execute_result will be ignored in FileOrderStrategy
|
generate_trade_decision
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/rule_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/rule_strategy.py
|
MIT
|
def __init__(
self,
*,
signal: Union[Signal, Tuple[BaseModel, Dataset], List, Dict, Text, pd.Series, pd.DataFrame] = None,
model=None,
dataset=None,
risk_degree: float = 0.95,
trade_exchange=None,
level_infra=None,
common_infra=None,
**kwargs,
):
"""
Parameters
-----------
signal :
the information to describe a signal. Please refer to the docs of `qlib.backtest.signal.create_signal_from`
the decision of the strategy will base on the given signal
risk_degree : float
position percentage of total value.
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it runs faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
"""
super().__init__(level_infra=level_infra, common_infra=common_infra, trade_exchange=trade_exchange, **kwargs)
self.risk_degree = risk_degree
# This is trying to be compatible with previous version of qlib task config
if model is not None and dataset is not None:
warnings.warn("`model` `dataset` is deprecated; use `signal`.", DeprecationWarning)
signal = model, dataset
self.signal: Signal = create_signal_from(signal)
|
Parameters
-----------
signal :
the information to describe a signal. Please refer to the docs of `qlib.backtest.signal.create_signal_from`
the decision of the strategy will base on the given signal
risk_degree : float
position percentage of total value.
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it runs faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/signal_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/signal_strategy.py
|
MIT
|
def __init__(
self,
*,
order_generator_cls_or_obj=OrderGenWOInteract,
**kwargs,
):
"""
signal :
the information to describe a signal. Please refer to the docs of `qlib.backtest.signal.create_signal_from`
the decision of the strategy will base on the given signal
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it runs faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
"""
super().__init__(**kwargs)
if isinstance(order_generator_cls_or_obj, type):
self.order_generator: OrderGenerator = order_generator_cls_or_obj()
else:
self.order_generator: OrderGenerator = order_generator_cls_or_obj
|
signal :
the information to describe a signal. Please refer to the docs of `qlib.backtest.signal.create_signal_from`
the decision of the strategy will base on the given signal
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it runs faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/signal_strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/signal_strategy.py
|
MIT
|
def __call__(
self,
r: np.ndarray,
F: np.ndarray,
cov_b: np.ndarray,
var_u: np.ndarray,
w0: np.ndarray,
wb: np.ndarray,
mfh: Optional[np.ndarray] = None,
mfs: Optional[np.ndarray] = None,
) -> np.ndarray:
"""
Args:
r (np.ndarray): expected returns
F (np.ndarray): factor exposure
cov_b (np.ndarray): factor covariance
var_u (np.ndarray): residual variance
w0 (np.ndarray): current holding weights
wb (np.ndarray): benchmark weights
mfh (np.ndarray): mask force holding
mfs (np.ndarray): mask force selling
Returns:
np.ndarray: optimized portfolio allocation
"""
# scale return to match volatility
if self.scale_return:
r = r / r.std()
r *= np.sqrt(np.mean(np.diag(F @ cov_b @ F.T) + var_u))
# target weight
w = cp.Variable(len(r), nonneg=True)
w.value = wb # for warm start
# precompute exposure
d = w - wb # benchmark exposure
v = d @ F # factor exposure
# objective
ret = d @ r # excess return
risk = cp.quad_form(v, cov_b) + var_u @ (d**2) # tracking error
obj = cp.Maximize(ret - self.lamb * risk)
# weight bounds
lb = np.zeros_like(wb)
ub = np.ones_like(wb)
# bench bounds
if self.b_dev is not None:
lb = np.maximum(lb, wb - self.b_dev)
ub = np.minimum(ub, wb + self.b_dev)
# force holding
if mfh is not None:
lb[mfh] = w0[mfh]
ub[mfh] = w0[mfh]
# force selling
# NOTE: this will override mfh
if mfs is not None:
lb[mfs] = 0
ub[mfs] = 0
# constraints
# TODO: currently we assume fullly invest in the stocks,
# in the future we should support holding cash as an asset
cons = [cp.sum(w) == 1, w >= lb, w <= ub]
# factor deviation
if self.f_dev is not None:
cons.extend([v >= -self.f_dev, v <= self.f_dev]) # pylint: disable=E1130
# total turnover constraint
t_cons = []
if self.delta is not None:
if w0 is not None and w0.sum() > 0:
t_cons.extend([cp.norm(w - w0, 1) <= self.delta])
# optimize
# trial 1: use all constraints
success = False
try:
prob = cp.Problem(obj, cons + t_cons)
prob.solve(solver=cp.ECOS, warm_start=True, **self.solver_kwargs)
assert prob.status == "optimal"
success = True
except Exception as e:
logger.warning(f"trial 1 failed {e} (status: {prob.status})")
# trial 2: remove turnover constraint
if not success and len(t_cons):
logger.info("try removing turnover constraint as the last optimization failed")
try:
w.value = wb
prob = cp.Problem(obj, cons)
prob.solve(solver=cp.ECOS, warm_start=True, **self.solver_kwargs)
assert prob.status in ["optimal", "optimal_inaccurate"]
success = True
except Exception as e:
logger.warning(f"trial 2 failed {e} (status: {prob.status})")
# return current weight if not success
if not success:
logger.warning("optimization failed, will return current holding weight")
return w0
if prob.status == "optimal_inaccurate":
logger.warning(f"the optimization is inaccurate")
# remove small weight
w = np.asarray(w.value)
w[w < self.epsilon] = 0
w /= w.sum()
return w
|
Args:
r (np.ndarray): expected returns
F (np.ndarray): factor exposure
cov_b (np.ndarray): factor covariance
var_u (np.ndarray): residual variance
w0 (np.ndarray): current holding weights
wb (np.ndarray): benchmark weights
mfh (np.ndarray): mask force holding
mfs (np.ndarray): mask force selling
Returns:
np.ndarray: optimized portfolio allocation
|
__call__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/enhanced_indexing.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/enhanced_indexing.py
|
MIT
|
def __call__(
self,
S: Union[np.ndarray, pd.DataFrame],
r: Optional[Union[np.ndarray, pd.Series]] = None,
w0: Optional[Union[np.ndarray, pd.Series]] = None,
) -> Union[np.ndarray, pd.Series]:
"""
Args:
S (np.ndarray or pd.DataFrame): covariance matrix
r (np.ndarray or pd.Series): expected return
w0 (np.ndarray or pd.Series): initial weights (for turnover control)
Returns:
np.ndarray or pd.Series: optimized portfolio allocation
"""
# transform dataframe into array
index = None
if isinstance(S, pd.DataFrame):
index = S.index
S = S.values
# transform return
if r is not None:
assert len(r) == len(S), "`r` has mismatched shape"
if isinstance(r, pd.Series):
assert r.index.equals(index), "`r` has mismatched index"
r = r.values
# transform initial weights
if w0 is not None:
assert len(w0) == len(S), "`w0` has mismatched shape"
if isinstance(w0, pd.Series):
assert w0.index.equals(index), "`w0` has mismatched index"
w0 = w0.values
# scale return to match volatility
if r is not None and self.scale_return:
r = r / r.std()
r *= np.sqrt(np.mean(np.diag(S)))
# optimize
w = self._optimize(S, r, w0)
# restore index if needed
if index is not None:
w = pd.Series(w, index=index)
return w
|
Args:
S (np.ndarray or pd.DataFrame): covariance matrix
r (np.ndarray or pd.Series): expected return
w0 (np.ndarray or pd.Series): initial weights (for turnover control)
Returns:
np.ndarray or pd.Series: optimized portfolio allocation
|
__call__
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _optimize_mvo(
self, S: np.ndarray, r: Optional[np.ndarray] = None, w0: Optional[np.ndarray] = None
) -> np.ndarray:
"""optimize mean-variance portfolio
This method solves the following optimization problem
min_w - w' r + lamb * w' S w
s.t. w >= 0, sum(w) == 1
where `S` is the covariance matrix, `u` is the expected returns,
and `lamb` is the risk aversion parameter.
"""
return self._solve(len(S), self._get_objective_mvo(S, r), *self._get_constrains(w0))
|
optimize mean-variance portfolio
This method solves the following optimization problem
min_w - w' r + lamb * w' S w
s.t. w >= 0, sum(w) == 1
where `S` is the covariance matrix, `u` is the expected returns,
and `lamb` is the risk aversion parameter.
|
_optimize_mvo
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _get_objective_gmv(self, S: np.ndarray) -> Callable:
"""global minimum variance optimization objective
Optimization objective
min_w w' S w
"""
def func(x):
return x @ S @ x
return func
|
global minimum variance optimization objective
Optimization objective
min_w w' S w
|
_get_objective_gmv
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _get_objective_mvo(self, S: np.ndarray, r: np.ndarray = None) -> Callable:
"""mean-variance optimization objective
Optimization objective
min_w - w' r + lamb * w' S w
"""
def func(x):
risk = x @ S @ x
ret = x @ r
return -ret + self.lamb * risk
return func
|
mean-variance optimization objective
Optimization objective
min_w - w' r + lamb * w' S w
|
_get_objective_mvo
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _get_objective_rp(self, S: np.ndarray) -> Callable:
"""risk-parity optimization objective
Optimization objective
min_w sum_i [w_i - (w' S w) / ((S w)_i * N)]**2
"""
def func(x):
N = len(x)
Sx = S @ x
xSx = x @ Sx
return np.sum((x - xSx / Sx / N) ** 2)
return func
|
risk-parity optimization objective
Optimization objective
min_w sum_i [w_i - (w' S w) / ((S w)_i * N)]**2
|
_get_objective_rp
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _get_constrains(self, w0: Optional[np.ndarray] = None):
"""optimization constraints
Defines the following constraints:
- no shorting and leverage: 0 <= w <= 1
- full investment: sum(w) == 1
- turnover constraint: |w - w0| <= delta
"""
# no shorting and leverage
bounds = so.Bounds(0.0, 1.0)
# full investment constraint
cons = [{"type": "eq", "fun": lambda x: np.sum(x) - 1}] # == 0
# turnover constraint
if w0 is not None:
cons.append({"type": "ineq", "fun": lambda x: self.delta - np.sum(np.abs(x - w0))}) # >= 0
return bounds, cons
|
optimization constraints
Defines the following constraints:
- no shorting and leverage: 0 <= w <= 1
- full investment: sum(w) == 1
- turnover constraint: |w - w0| <= delta
|
_get_constrains
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def _solve(self, n: int, obj: Callable, bounds: so.Bounds, cons: List) -> np.ndarray:
"""solve optimization
Args:
n (int): number of parameters
obj (callable): optimization objective
bounds (Bounds): bounds of parameters
cons (list): optimization constraints
"""
# add l2 regularization
wrapped_obj = obj
if self.alpha > 0:
def opt_obj(x):
return obj(x) + self.alpha * np.sum(np.square(x))
wrapped_obj = opt_obj
# solve
x0 = np.ones(n) / n # init results
sol = so.minimize(wrapped_obj, x0, bounds=bounds, constraints=cons, tol=self.tol)
if not sol.success:
warnings.warn(f"optimization not success ({sol.status})")
return sol.x
|
solve optimization
Args:
n (int): number of parameters
obj (callable): optimization objective
bounds (Bounds): bounds of parameters
cons (list): optimization constraints
|
_solve
|
python
|
microsoft/qlib
|
qlib/contrib/strategy/optimizer/optimizer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/strategy/optimizer/optimizer.py
|
MIT
|
def __init__(self, config, TUNER_CONFIG_MANAGER):
"""
:param config: The config dict for tuner experiment
:param TUNER_CONFIG_MANAGER: The tuner config manager
"""
self.name = config.get("name", "tuner_experiment")
# The dir of the config
self.global_dir = config.get("dir", os.path.dirname(TUNER_CONFIG_MANAGER.config_path))
# The dir of the result of tuner experiment
self.tuner_ex_dir = config.get("tuner_ex_dir", os.path.join(self.global_dir, self.name))
if not os.path.exists(self.tuner_ex_dir):
os.makedirs(self.tuner_ex_dir)
# The dir of the results of all estimator experiments
self.estimator_ex_dir = config.get("estimator_ex_dir", os.path.join(self.tuner_ex_dir, "estimator_experiment"))
if not os.path.exists(self.estimator_ex_dir):
os.makedirs(self.estimator_ex_dir)
# Get the tuner type
self.tuner_module_path = config.get("tuner_module_path", "qlib.contrib.tuner.tuner")
self.tuner_class = config.get("tuner_class", "QLibTuner")
# Save the tuner experiment for further view
tuner_ex_config_path = os.path.join(self.tuner_ex_dir, "tuner_config.yaml")
with open(tuner_ex_config_path, "w") as fp:
yaml.dump(TUNER_CONFIG_MANAGER.config, fp)
|
:param config: The config dict for tuner experiment
:param TUNER_CONFIG_MANAGER: The tuner config manager
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/tuner/config.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/tuner/config.py
|
MIT
|
def init_tuner(self, tuner_index, tuner_config):
"""
Implement this method to build the tuner by config
return: tuner
"""
# 1. Add experiment config in tuner_config
tuner_config["experiment"] = {
"name": "estimator_experiment_{}".format(tuner_index),
"id": tuner_index,
"dir": self.pipeline_ex_config.estimator_ex_dir,
"observer_type": "file_storage",
}
tuner_config["qlib_client"] = self.qlib_client_config
# 2. Add data config in tuner_config
tuner_config["data"] = self.data_config
# 3. Add backtest config in tuner_config
tuner_config["backtest"] = self.backtest_config
# 4. Update trainer in tuner_config
tuner_config["trainer"].update({"args": self.time_config})
# 5. Import Tuner class
tuner_module = get_module_by_module_path(self.pipeline_ex_config.tuner_module_path)
tuner_class = getattr(tuner_module, self.pipeline_ex_config.tuner_class)
# 6. Return the specific tuner
return tuner_class(tuner_config, self.optim_config)
|
Implement this method to build the tuner by config
return: tuner
|
init_tuner
|
python
|
microsoft/qlib
|
qlib/contrib/tuner/pipeline.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/tuner/pipeline.py
|
MIT
|
def load(self, instrument, start_index, end_index, *args):
"""load feature
This function is responsible for loading feature/expression based on the expression engine.
The concrete implementation will be separated into two parts:
1) caching data, handle errors.
- This part is shared by all the expressions and implemented in Expression
2) processing and calculating data based on the specific expression.
- This part is different in each expression and implemented in each expression
Expression Engine is shared by different data.
Different data will have different extra information for `args`.
Parameters
----------
instrument : str
instrument code.
start_index : str
feature start index [in calendar].
end_index : str
feature end index [in calendar].
*args may contain following information:
1) if it is used in basic expression engine data, it contains following arguments
freq: str
feature frequency.
2) if is used in PIT data, it contains following arguments
cur_pit:
it is designed for the point-in-time data.
period: int
This is used for query specific period.
The period is represented with int in Qlib. (e.g. 202001 may represent the first quarter in 2020)
Returns
----------
pd.Series
feature series: The index of the series is the calendar index
"""
from .cache import H # pylint: disable=C0415
# cache
cache_key = str(self), instrument, start_index, end_index, *args
if cache_key in H["f"]:
return H["f"][cache_key]
if start_index is not None and end_index is not None and start_index > end_index:
raise ValueError("Invalid index range: {} {}".format(start_index, end_index))
try:
series = self._load_internal(instrument, start_index, end_index, *args)
except Exception as e:
get_module_logger("data").debug(
f"Loading data error: instrument={instrument}, expression={str(self)}, "
f"start_index={start_index}, end_index={end_index}, args={args}. "
f"error info: {str(e)}"
)
raise
series.name = str(self)
H["f"][cache_key] = series
return series
|
load feature
This function is responsible for loading feature/expression based on the expression engine.
The concrete implementation will be separated into two parts:
1) caching data, handle errors.
- This part is shared by all the expressions and implemented in Expression
2) processing and calculating data based on the specific expression.
- This part is different in each expression and implemented in each expression
Expression Engine is shared by different data.
Different data will have different extra information for `args`.
Parameters
----------
instrument : str
instrument code.
start_index : str
feature start index [in calendar].
end_index : str
feature end index [in calendar].
*args may contain following information:
1) if it is used in basic expression engine data, it contains following arguments
freq: str
feature frequency.
2) if is used in PIT data, it contains following arguments
cur_pit:
it is designed for the point-in-time data.
period: int
This is used for query specific period.
The period is represented with int in Qlib. (e.g. 202001 may represent the first quarter in 2020)
Returns
----------
pd.Series
feature series: The index of the series is the calendar index
|
load
|
python
|
microsoft/qlib
|
qlib/data/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/base.py
|
MIT
|
def get_longest_back_rolling(self):
"""Get the longest length of historical data the feature has accessed
This is designed for getting the needed range of the data to calculate
the features in specific range at first. However, situations like
Ref(Ref($close, -1), 1) can not be handled rightly.
So this will only used for detecting the length of historical data needed.
"""
# TODO: forward operator like Ref($close, -1) is not supported yet.
raise NotImplementedError("This function must be implemented in your newly defined feature")
|
Get the longest length of historical data the feature has accessed
This is designed for getting the needed range of the data to calculate
the features in specific range at first. However, situations like
Ref(Ref($close, -1), 1) can not be handled rightly.
So this will only used for detecting the length of historical data needed.
|
get_longest_back_rolling
|
python
|
microsoft/qlib
|
qlib/data/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/base.py
|
MIT
|
def get_cache(mem_cache, key):
"""get mem cache
:param mem_cache: MemCache attribute('c'/'i'/'f').
:param key: cache key.
:return: cache value; if cache not exist, return None.
"""
value = None
expire = False
if key in mem_cache:
value, latest_time = mem_cache[key]
expire = (time.time() - latest_time) > MemCacheExpire.CACHE_EXPIRE
return value, expire
|
get mem cache
:param mem_cache: MemCache attribute('c'/'i'/'f').
:param key: cache key.
:return: cache value; if cache not exist, return None.
|
get_cache
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def expression(self, instrument, field, start_time, end_time, freq):
"""Get expression data.
.. note:: Same interface as `expression` method in expression provider
"""
try:
return self._expression(instrument, field, start_time, end_time, freq)
except NotImplementedError:
return self.provider.expression(instrument, field, start_time, end_time, freq)
|
Get expression data.
.. note:: Same interface as `expression` method in expression provider
|
expression
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def dataset(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get feature dataset.
.. note:: Same interface as `dataset` method in dataset provider
.. note:: The server use redis_lock to make sure
read-write conflicts will not be triggered
but client readers are not considered.
"""
if disk_cache == 0:
# skip cache
return self.provider.dataset(
instruments, fields, start_time, end_time, freq, inst_processors=inst_processors
)
else:
# use and replace cache
try:
return self._dataset(
instruments, fields, start_time, end_time, freq, disk_cache, inst_processors=inst_processors
)
except NotImplementedError:
return self.provider.dataset(
instruments, fields, start_time, end_time, freq, inst_processors=inst_processors
)
|
Get feature dataset.
.. note:: Same interface as `dataset` method in dataset provider
.. note:: The server use redis_lock to make sure
read-write conflicts will not be triggered
but client readers are not considered.
|
dataset
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def _dataset(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get feature dataset using cache.
Override this method to define how to get feature dataset corresponding to users' own cache mechanism.
"""
raise NotImplementedError("Implement this method if you want to use dataset feature cache")
|
Get feature dataset using cache.
Override this method to define how to get feature dataset corresponding to users' own cache mechanism.
|
_dataset
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def _dataset_uri(
self, instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1, inst_processors=[]
):
"""Get a uri of feature dataset using cache.
specially:
disk_cache=1 means using data set cache and return the uri of cache file.
disk_cache=0 means client knows the path of expression cache,
server checks if the cache exists(if not, generate it), and client loads data by itself.
Override this method to define how to get feature dataset uri corresponding to users' own cache mechanism.
"""
raise NotImplementedError(
"Implement this method if you want to use dataset feature cache as a cache file for client"
)
|
Get a uri of feature dataset using cache.
specially:
disk_cache=1 means using data set cache and return the uri of cache file.
disk_cache=0 means client knows the path of expression cache,
server checks if the cache exists(if not, generate it), and client loads data by itself.
Override this method to define how to get feature dataset uri corresponding to users' own cache mechanism.
|
_dataset_uri
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def cache_to_origin_data(data, fields):
"""cache data to origin data
:param data: pd.DataFrame, cache data.
:param fields: feature fields.
:return: pd.DataFrame.
"""
not_space_fields = remove_fields_space(fields)
data = data.loc[:, not_space_fields]
# set features fields
data.columns = [str(i) for i in fields]
return data
|
cache data to origin data
:param data: pd.DataFrame, cache data.
:param fields: feature fields.
:return: pd.DataFrame.
|
cache_to_origin_data
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def gen_expression_cache(self, expression_data, cache_path, instrument, field, freq, last_update):
"""use bin file to save like feature-data."""
# Make sure the cache runs right when the directory is deleted
# while running
meta = {
"info": {"instrument": instrument, "field": field, "freq": freq, "last_update": last_update},
"meta": {"last_visit": time.time(), "visits": 1},
}
self.logger.debug(f"generating expression cache: {meta}")
self.clear_cache(cache_path)
meta_path = cache_path.with_suffix(".meta")
with meta_path.open("wb") as f:
pickle.dump(meta, f, protocol=C.dump_protocol_version)
meta_path.chmod(stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
df = expression_data.to_frame()
r = np.hstack([df.index[0], expression_data]).astype("<f")
r.tofile(str(cache_path))
|
use bin file to save like feature-data.
|
gen_expression_cache
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def read_data_from_cache(cls, cache_path: Union[str, Path], start_time, end_time, fields):
"""read_cache_from
This function can read data from the disk cache dataset
:param cache_path:
:param start_time:
:param end_time:
:param fields: The fields order of the dataset cache is sorted. So rearrange the columns to make it consistent.
:return:
"""
im = DiskDatasetCache.IndexManager(cache_path)
index_data = im.get_index(start_time, end_time)
if index_data.shape[0] > 0:
start, stop = (
index_data["start"].iloc[0].item(),
index_data["end"].iloc[-1].item(),
)
else:
start = stop = 0
with pd.HDFStore(cache_path, mode="r") as store:
if "/{}".format(im.KEY) in store.keys():
df = store.select(key=im.KEY, start=start, stop=stop)
df = df.swaplevel("datetime", "instrument").sort_index()
# read cache and need to replace not-space fields to field
df = cls.cache_to_origin_data(df, fields)
else:
df = pd.DataFrame(columns=fields)
return df
|
read_cache_from
This function can read data from the disk cache dataset
:param cache_path:
:param start_time:
:param end_time:
:param fields: The fields order of the dataset cache is sorted. So rearrange the columns to make it consistent.
:return:
|
read_data_from_cache
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def gen_dataset_cache(self, cache_path: Union[str, Path], instruments, fields, freq, inst_processors=[]):
"""gen_dataset_cache
.. note:: This function does not consider the cache read write lock. Please
acquire the lock outside this function
The format the cache contains 3 parts(followed by typical filename).
- index : cache/d41366901e25de3ec47297f12e2ba11d.index
- The content of the file may be in following format(pandas.Series)
.. code-block:: python
start end
1999-11-10 00:00:00 0 1
1999-11-11 00:00:00 1 2
1999-11-12 00:00:00 2 3
...
.. note:: The start is closed. The end is open!!!!!
- Each line contains two element <start_index, end_index> with a timestamp as its index.
- It indicates the `start_index` (included) and `end_index` (excluded) of the data for `timestamp`
- meta data: cache/d41366901e25de3ec47297f12e2ba11d.meta
- data : cache/d41366901e25de3ec47297f12e2ba11d
- This is a hdf file sorted by datetime
:param cache_path: The path to store the cache.
:param instruments: The instruments to store the cache.
:param fields: The fields to store the cache.
:param freq: The freq to store the cache.
:param inst_processors: Instrument processors.
:return type pd.DataFrame; The fields of the returned DataFrame are consistent with the parameters of the function.
"""
# get calendar
from .data import Cal # pylint: disable=C0415
cache_path = Path(cache_path)
_calendar = Cal.calendar(freq=freq)
self.logger.debug(f"Generating dataset cache {cache_path}")
# Make sure the cache runs right when the directory is deleted
# while running
self.clear_cache(cache_path)
features = self.provider.dataset(
instruments, fields, _calendar[0], _calendar[-1], freq, inst_processors=inst_processors
)
if features.empty:
return features
# swap index and sorted
features = features.swaplevel("instrument", "datetime").sort_index()
# write cache data
with pd.HDFStore(str(cache_path.with_suffix(".data"))) as store:
cache_to_orig_map = dict(zip(remove_fields_space(features.columns), features.columns))
orig_to_cache_map = dict(zip(features.columns, remove_fields_space(features.columns)))
cache_features = features[list(cache_to_orig_map.values())].rename(columns=orig_to_cache_map)
# cache columns
cache_columns = sorted(cache_features.columns)
cache_features = cache_features.loc[:, cache_columns]
cache_features = cache_features.loc[:, ~cache_features.columns.duplicated()]
store.append(DatasetCache.HDF_KEY, cache_features, append=False)
# write meta file
meta = {
"info": {
"instruments": instruments,
"fields": list(cache_features.columns),
"freq": freq,
"last_update": str(_calendar[-1]), # The last_update to store the cache
"inst_processors": inst_processors, # The last_update to store the cache
},
"meta": {"last_visit": time.time(), "visits": 1},
}
with cache_path.with_suffix(".meta").open("wb") as f:
pickle.dump(meta, f, protocol=C.dump_protocol_version)
cache_path.with_suffix(".meta").chmod(stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
# write index file
im = DiskDatasetCache.IndexManager(cache_path)
index_data = im.build_index_from_data(features)
im.update(index_data)
# rename the file after the cache has been generated
# this doesn't work well on windows, but our server won't use windows
# temporarily
cache_path.with_suffix(".data").rename(cache_path)
# the fields of the cached features are converted to the original fields
return features.swaplevel("datetime", "instrument")
|
gen_dataset_cache
.. note:: This function does not consider the cache read write lock. Please
acquire the lock outside this function
The format the cache contains 3 parts(followed by typical filename).
- index : cache/d41366901e25de3ec47297f12e2ba11d.index
- The content of the file may be in following format(pandas.Series)
.. code-block:: python
start end
1999-11-10 00:00:00 0 1
1999-11-11 00:00:00 1 2
1999-11-12 00:00:00 2 3
...
.. note:: The start is closed. The end is open!!!!!
- Each line contains two element <start_index, end_index> with a timestamp as its index.
- It indicates the `start_index` (included) and `end_index` (excluded) of the data for `timestamp`
- meta data: cache/d41366901e25de3ec47297f12e2ba11d.meta
- data : cache/d41366901e25de3ec47297f12e2ba11d
- This is a hdf file sorted by datetime
:param cache_path: The path to store the cache.
:param instruments: The instruments to store the cache.
:param fields: The fields to store the cache.
:param freq: The freq to store the cache.
:param inst_processors: Instrument processors.
:return type pd.DataFrame; The fields of the returned DataFrame are consistent with the parameters of the function.
|
gen_dataset_cache
|
python
|
microsoft/qlib
|
qlib/data/cache.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/cache.py
|
MIT
|
def send_request(self, request_type, request_content, msg_queue, msg_proc_func=None):
"""Send a certain request to server.
Parameters
----------
request_type : str
type of proposed request, 'calendar'/'instrument'/'feature'.
request_content : dict
records the information of the request.
msg_proc_func : func
the function to process the message when receiving response, should have arg `*args`.
msg_queue: Queue
The queue to pass the message after callback.
"""
head_info = {"version": qlib.__version__}
def request_callback(*args):
"""callback_wrapper
:param *args: args[0] is the response content
"""
# args[0] is the response content
self.logger.debug("receive data and enter queue")
msg = dict(args[0])
if msg["detailed_info"] is not None:
if msg["status"] != 0:
self.logger.error(msg["detailed_info"])
else:
self.logger.info(msg["detailed_info"])
if msg["status"] != 0:
ex = ValueError(f"Bad response(status=={msg['status']}), detailed info: {msg['detailed_info']}")
msg_queue.put(ex)
else:
if msg_proc_func is not None:
try:
ret = msg_proc_func(msg["result"])
except Exception as e:
self.logger.exception("Error when processing message.")
ret = e
else:
ret = msg["result"]
msg_queue.put(ret)
self.disconnect()
self.logger.debug("disconnected")
self.logger.debug("try connecting")
self.connect_server()
self.logger.debug("connected")
# The pickle is for passing some parameters with special type(such as
# pd.Timestamp)
request_content = {"head": head_info, "body": pickle.dumps(request_content, protocol=C.dump_protocol_version)}
self.sio.on(request_type + "_response", request_callback)
self.logger.debug("try sending")
self.sio.emit(request_type + "_request", request_content)
self.sio.wait()
|
Send a certain request to server.
Parameters
----------
request_type : str
type of proposed request, 'calendar'/'instrument'/'feature'.
request_content : dict
records the information of the request.
msg_proc_func : func
the function to process the message when receiving response, should have arg `*args`.
msg_queue: Queue
The queue to pass the message after callback.
|
send_request
|
python
|
microsoft/qlib
|
qlib/data/client.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/client.py
|
MIT
|
def request_callback(*args):
"""callback_wrapper
:param *args: args[0] is the response content
"""
# args[0] is the response content
self.logger.debug("receive data and enter queue")
msg = dict(args[0])
if msg["detailed_info"] is not None:
if msg["status"] != 0:
self.logger.error(msg["detailed_info"])
else:
self.logger.info(msg["detailed_info"])
if msg["status"] != 0:
ex = ValueError(f"Bad response(status=={msg['status']}), detailed info: {msg['detailed_info']}")
msg_queue.put(ex)
else:
if msg_proc_func is not None:
try:
ret = msg_proc_func(msg["result"])
except Exception as e:
self.logger.exception("Error when processing message.")
ret = e
else:
ret = msg["result"]
msg_queue.put(ret)
self.disconnect()
self.logger.debug("disconnected")
|
callback_wrapper
:param *args: args[0] is the response content
|
request_callback
|
python
|
microsoft/qlib
|
qlib/data/client.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/client.py
|
MIT
|
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range.
end_time : str
end of the time range.
freq : str
time frequency, available: year/quarter/month/week/day.
future : bool
whether including future trading day.
Returns
----------
list
calendar list
"""
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
|
Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range.
end_time : str
end of the time range.
freq : str
time frequency, available: year/quarter/month/week/day.
future : bool
whether including future trading day.
Returns
----------
list
calendar list
|
calendar
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def locate_index(
self, start_time: Union[pd.Timestamp, str], end_time: Union[pd.Timestamp, str], freq: str, future: bool = False
):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : pd.Timestamp
start of the time range.
end_time : pd.Timestamp
end of the time range.
freq : str
time frequency, available: year/quarter/month/week/day.
future : bool
whether including future trading day.
Returns
-------
pd.Timestamp
the real start time.
pd.Timestamp
the real end time.
int
the index of start time.
int
the index of end time.
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError as index_e:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
) from index_e
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
|
Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : pd.Timestamp
start of the time range.
end_time : pd.Timestamp
end of the time range.
freq : str
time frequency, available: year/quarter/month/week/day.
future : bool
whether including future trading day.
Returns
-------
pd.Timestamp
the real start time.
pd.Timestamp
the real end time.
int
the index of start time.
int
the index of end time.
|
locate_index
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file.
future : bool
whether including future trading day.
Returns
-------
list
list of timestamps.
dict
dict composed by timestamp as key and index as value for fast search.
"""
flag = f"{freq}_future_{future}"
if flag not in H["c"]:
_calendar = np.array(self.load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return H["c"][flag]
|
Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file.
future : bool
whether including future trading day.
Returns
-------
list
list of timestamps.
dict
dict composed by timestamp as key and index as value for fast search.
|
_get_calendar
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def instruments(market: Union[List, str] = "all", filter_pipe: Union[List, None] = None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : Union[List, str]
str:
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500.
list:
["ID1", "ID2"]. A list of stocks
filter_pipe : list
the list of dynamic filters.
Returns
----------
dict: if isinstance(market, str)
dict of stockpool config.
{`market` => base market name, `filter_pipe` => list of filters}
example :
.. code-block::
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
list: if isinstance(market, list)
just return the original list directly.
NOTE: this will make the instruments compatible with more cases. The user code will be simpler.
"""
if isinstance(market, list):
return market
from .filter import SeriesDFilter # pylint: disable=C0415
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
if isinstance(filter_t, dict):
_config = filter_t
elif isinstance(filter_t, SeriesDFilter):
_config = filter_t.to_config()
else:
raise TypeError(
f"Unsupported filter types: {type(filter_t)}! Filter only supports dict or isinstance(filter, SeriesDFilter)"
)
config["filter_pipe"].append(_config)
return config
|
Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : Union[List, str]
str:
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500.
list:
["ID1", "ID2"]. A list of stocks
filter_pipe : list
the list of dynamic filters.
Returns
----------
dict: if isinstance(market, str)
dict of stockpool config.
{`market` => base market name, `filter_pipe` => list of filters}
example :
.. code-block::
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
list: if isinstance(market, list)
just return the original list directly.
NOTE: this will make the instruments compatible with more cases. The user code will be simpler.
|
instruments
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def period_feature(
self,
instrument,
field,
start_index: int,
end_index: int,
cur_time: pd.Timestamp,
period: Optional[int] = None,
) -> pd.Series:
"""
get the historical periods data series between `start_index` and `end_index`
Parameters
----------
start_index: int
start_index is a relative index to the latest period to cur_time
end_index: int
end_index is a relative index to the latest period to cur_time
in most cases, the start_index and end_index will be a non-positive values
For example, start_index == -3 end_index == 0 and current period index is cur_idx,
then the data between [start_index + cur_idx, end_index + cur_idx] will be retrieved.
period: int
This is used for query specific period.
The period is represented with int in Qlib. (e.g. 202001 may represent the first quarter in 2020)
NOTE: `period` will override `start_index` and `end_index`
Returns
-------
pd.Series
The index will be integers to indicate the periods of the data
An typical examples will be
TODO
Raises
------
FileNotFoundError
This exception will be raised if the queried data do not exist.
"""
raise NotImplementedError(f"Please implement the `period_feature` method")
|
get the historical periods data series between `start_index` and `end_index`
Parameters
----------
start_index: int
start_index is a relative index to the latest period to cur_time
end_index: int
end_index is a relative index to the latest period to cur_time
in most cases, the start_index and end_index will be a non-positive values
For example, start_index == -3 end_index == 0 and current period index is cur_idx,
then the data between [start_index + cur_idx, end_index + cur_idx] will be retrieved.
period: int
This is used for query specific period.
The period is represented with int in Qlib. (e.g. 202001 may represent the first quarter in 2020)
NOTE: `period` will override `start_index` and `end_index`
Returns
-------
pd.Series
The index will be integers to indicate the periods of the data
An typical examples will be
TODO
Raises
------
FileNotFoundError
This exception will be raised if the queried data do not exist.
|
period_feature
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
inst_processors=[],
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config.
fields : list
list of feature instances.
start_time : str
start of the time range.
end_time : str
end of the time range.
freq : str
time frequency.
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache.
"""
# TODO: qlib-server support inst_processors
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache, inst_processors)
|
Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config.
fields : list
list of feature instances.
start_time : str
start of the time range.
end_time : str
end of the time range.
freq : str
time frequency.
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache.
|
_uri
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
|
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
|
get_instruments_d
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
column_names = [str(f) for f in fields]
return column_names
|
Get column names from input fields
|
get_column_names
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def dataset_processor(instruments_d, column_names, start_time, end_time, freq, inst_processors=[]):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
# One process for one task, so that the memory will be freed quicker.
workers = max(min(C.get_kernels(freq), len(instruments_d)), 1)
# create iterator
if isinstance(instruments_d, dict):
it = instruments_d.items()
else:
it = zip(instruments_d, [None] * len(instruments_d))
inst_l = []
task_l = []
for inst, spans in it:
inst_l.append(inst)
task_l.append(
delayed(DatasetProvider.inst_calculator)(
inst, start_time, end_time, freq, normalize_column_names, spans, C, inst_processors
)
)
data = dict(
zip(
inst_l,
ParallelExt(n_jobs=workers, backend=C.joblib_backend, maxtasksperchild=C.maxtasksperchild)(task_l),
)
)
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst]) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst]
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(
index=pd.MultiIndex.from_arrays([[], []], names=("instrument", "datetime")),
columns=column_names,
dtype=np.float32,
)
return data
|
Load and process the data, return the data set.
- default using multi-kernel method.
|
dataset_processor
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def inst_calculator(inst, start_time, end_time, freq, column_names, spans=None, g_config=None, inst_processors=[]):
"""
Calculate the expressions for **one** instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# FIXME: Windows OS or MacOS using spawn: https://docs.python.org/3.8/library/multiprocessing.html?highlight=spawn#contexts-and-start-methods
# NOTE: This place is compatible with windows, windows multi-process is spawn
C.register_from_C(g_config)
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
if not data.empty and not np.issubdtype(data.index.dtype, np.dtype("M")):
# If the underlaying provides the data not in datetime format, we'll convert it into datetime format
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(int)]
data.index.names = ["datetime"]
if not data.empty and spans is not None:
mask = np.zeros(len(data), dtype=bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
data = data[mask]
for _processor in inst_processors:
if _processor:
_processor_obj = init_instance_by_config(_processor, accept_types=InstProcessor)
data = _processor_obj(data, instrument=inst)
return data
|
Calculate the expressions for **one** instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
|
inst_calculator
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file.
future: bool
Returns
----------
list
list of timestamps
"""
try:
backend_obj = self.backend_obj(freq=freq, future=future).data
except ValueError:
if future:
get_module_logger("data").warning(
f"load calendar error: freq={freq}, future={future}; return current calendar!"
)
get_module_logger("data").warning(
"You can get future calendar by referring to the following document: https://github.com/microsoft/qlib/blob/main/scripts/data_collector/contrib/README.md"
)
backend_obj = self.backend_obj(freq=freq, future=False).data
else:
raise
return [pd.Timestamp(x) for x in backend_obj]
|
Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file.
future: bool
Returns
----------
list
list of timestamps
|
load_calendar
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def __init__(self, align_time: bool = True):
"""
Parameters
----------
align_time : bool
Will we align the time to calendar
the frequency is flexible in some dataset and can't be aligned.
For the data with fixed frequency with a shared calendar, the align data to the calendar will provides following benefits
- Align queries to the same parameters, so the cache can be shared.
"""
super().__init__()
self.align_time = align_time
|
Parameters
----------
align_time : bool
Will we align the time to calendar
the frequency is flexible in some dataset and can't be aligned.
For the data with fixed frequency with a shared calendar, the align data to the calendar will provides following benefits
- Align queries to the same parameters, so the cache can be shared.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
workers = max(min(C.kernels, len(instruments_d)), 1)
ParallelExt(n_jobs=workers, backend=C.joblib_backend, maxtasksperchild=C.maxtasksperchild)(
delayed(LocalDatasetProvider.cache_walker)(inst, start_time, end_time, freq, column_names)
for inst in instruments_d
)
|
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
|
multi_cache_walker
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
|
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
|
cache_walker
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
inst_processors=[],
):
"""
Parameters
----------
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
fields = list(fields) # In case of tuple.
try:
return DatasetD.dataset(
instruments, fields, start_time, end_time, freq, disk_cache, inst_processors=inst_processors
)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, inst_processors=inst_processors)
|
Parameters
----------
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
|
features
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
|
_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
|
_uri
|
python
|
microsoft/qlib
|
qlib/data/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/data.py
|
MIT
|
def __init__(self, fstart_time=None, fend_time=None, keep=False):
"""Init function for filter base class.
Filter a set of instruments based on a certain rule within a certain period assigned by fstart_time and fend_time.
Parameters
----------
fstart_time: str
the time for the filter rule to start filter the instruments.
fend_time: str
the time for the filter rule to stop filter the instruments.
keep: bool
whether to keep the instruments of which features don't exist in the filter time span.
"""
super(SeriesDFilter, self).__init__()
self.filter_start_time = pd.Timestamp(fstart_time) if fstart_time else None
self.filter_end_time = pd.Timestamp(fend_time) if fend_time else None
self.keep = keep
|
Init function for filter base class.
Filter a set of instruments based on a certain rule within a certain period assigned by fstart_time and fend_time.
Parameters
----------
fstart_time: str
the time for the filter rule to start filter the instruments.
fend_time: str
the time for the filter rule to stop filter the instruments.
keep: bool
whether to keep the instruments of which features don't exist in the filter time span.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def _getTimeBound(self, instruments):
"""Get time bound for all instruments.
Parameters
----------
instruments: dict
the dict of instruments in the form {instrument_name => list of timestamp tuple}.
Returns
----------
pd.Timestamp, pd.Timestamp
the lower time bound and upper time bound of all the instruments.
"""
trange = Cal.calendar(freq=self.filter_freq)
ubound, lbound = trange[0], trange[-1]
for _, timestamp in instruments.items():
if timestamp:
lbound = timestamp[0][0] if timestamp[0][0] < lbound else lbound
ubound = timestamp[-1][-1] if timestamp[-1][-1] > ubound else ubound
return lbound, ubound
|
Get time bound for all instruments.
Parameters
----------
instruments: dict
the dict of instruments in the form {instrument_name => list of timestamp tuple}.
Returns
----------
pd.Timestamp, pd.Timestamp
the lower time bound and upper time bound of all the instruments.
|
_getTimeBound
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def _toSeries(self, time_range, target_timestamp):
"""Convert the target timestamp to a pandas series of bool value within a time range.
Make the time inside the target_timestamp range TRUE, others FALSE.
Parameters
----------
time_range : D.calendar
the time range of the instruments.
target_timestamp : list
the list of tuple (timestamp, timestamp).
Returns
----------
pd.Series
the series of bool value for an instrument.
"""
# Construct a whole dict of {date => bool}
timestamp_series = {timestamp: False for timestamp in time_range}
# Convert to pd.Series
timestamp_series = pd.Series(timestamp_series)
# Fill the date within target_timestamp with TRUE
for start, end in target_timestamp:
timestamp_series[Cal.calendar(start_time=start, end_time=end, freq=self.filter_freq)] = True
return timestamp_series
|
Convert the target timestamp to a pandas series of bool value within a time range.
Make the time inside the target_timestamp range TRUE, others FALSE.
Parameters
----------
time_range : D.calendar
the time range of the instruments.
target_timestamp : list
the list of tuple (timestamp, timestamp).
Returns
----------
pd.Series
the series of bool value for an instrument.
|
_toSeries
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def _filterSeries(self, timestamp_series, filter_series):
"""Filter the timestamp series with filter series by using element-wise AND operation of the two series.
Parameters
----------
timestamp_series : pd.Series
the series of bool value indicating existing time.
filter_series : pd.Series
the series of bool value indicating filter feature.
Returns
----------
pd.Series
the series of bool value indicating whether the date satisfies the filter condition and exists in target timestamp.
"""
fstart, fend = list(filter_series.keys())[0], list(filter_series.keys())[-1]
filter_series = filter_series.astype("bool") # Make sure the filter_series is boolean
timestamp_series[fstart:fend] = timestamp_series[fstart:fend] & filter_series
return timestamp_series
|
Filter the timestamp series with filter series by using element-wise AND operation of the two series.
Parameters
----------
timestamp_series : pd.Series
the series of bool value indicating existing time.
filter_series : pd.Series
the series of bool value indicating filter feature.
Returns
----------
pd.Series
the series of bool value indicating whether the date satisfies the filter condition and exists in target timestamp.
|
_filterSeries
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def _toTimestamp(self, timestamp_series):
"""Convert the timestamp series to a list of tuple (timestamp, timestamp) indicating a continuous range of TRUE.
Parameters
----------
timestamp_series: pd.Series
the series of bool value after being filtered.
Returns
----------
list
the list of tuple (timestamp, timestamp).
"""
# sort the timestamp_series according to the timestamps
timestamp_series.sort_index()
timestamp = []
_lbool = None
_ltime = None
_cur_start = None
for _ts, _bool in timestamp_series.items():
# there is likely to be NAN when the filter series don't have the
# bool value, so we just change the NAN into False
if _bool == np.nan:
_bool = False
if _lbool is None:
_cur_start = _ts
_lbool = _bool
_ltime = _ts
continue
if (_lbool, _bool) == (True, False):
if _cur_start:
timestamp.append((_cur_start, _ltime))
elif (_lbool, _bool) == (False, True):
_cur_start = _ts
_lbool = _bool
_ltime = _ts
if _lbool:
timestamp.append((_cur_start, _ltime))
return timestamp
|
Convert the timestamp series to a list of tuple (timestamp, timestamp) indicating a continuous range of TRUE.
Parameters
----------
timestamp_series: pd.Series
the series of bool value after being filtered.
Returns
----------
list
the list of tuple (timestamp, timestamp).
|
_toTimestamp
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def __call__(self, instruments, start_time=None, end_time=None, freq="day"):
"""Call this filter to get filtered instruments list"""
self.filter_freq = freq
return self.filter_main(instruments, start_time, end_time)
|
Call this filter to get filtered instruments list
|
__call__
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def filter_main(self, instruments, start_time=None, end_time=None):
"""Implement this method to filter the instruments.
Parameters
----------
instruments: dict
input instruments to be filtered.
start_time: str
start of the time range.
end_time: str
end of the time range.
Returns
----------
dict
filtered instruments, same structure as input instruments.
"""
lbound, ubound = self._getTimeBound(instruments)
start_time = pd.Timestamp(start_time or lbound)
end_time = pd.Timestamp(end_time or ubound)
_instruments_filtered = {}
_all_calendar = Cal.calendar(start_time=start_time, end_time=end_time, freq=self.filter_freq)
_filter_calendar = Cal.calendar(
start_time=self.filter_start_time and max(self.filter_start_time, _all_calendar[0]) or _all_calendar[0],
end_time=self.filter_end_time and min(self.filter_end_time, _all_calendar[-1]) or _all_calendar[-1],
freq=self.filter_freq,
)
_all_filter_series = self._getFilterSeries(instruments, _filter_calendar[0], _filter_calendar[-1])
for inst, timestamp in instruments.items():
# Construct a whole map of date
_timestamp_series = self._toSeries(_all_calendar, timestamp)
# Get filter series
if inst in _all_filter_series:
_filter_series = _all_filter_series[inst]
else:
if self.keep:
_filter_series = pd.Series({timestamp: True for timestamp in _filter_calendar})
else:
_filter_series = pd.Series({timestamp: False for timestamp in _filter_calendar})
# Calculate bool value within the range of filter
_timestamp_series = self._filterSeries(_timestamp_series, _filter_series)
# Reform the map to (start_timestamp, end_timestamp) format
_timestamp = self._toTimestamp(_timestamp_series)
# Remove empty timestamp
if _timestamp:
_instruments_filtered[inst] = _timestamp
return _instruments_filtered
|
Implement this method to filter the instruments.
Parameters
----------
instruments: dict
input instruments to be filtered.
start_time: str
start of the time range.
end_time: str
end of the time range.
Returns
----------
dict
filtered instruments, same structure as input instruments.
|
filter_main
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def __init__(self, name_rule_re, fstart_time=None, fend_time=None):
"""Init function for name filter class
Parameters
----------
name_rule_re: str
regular expression for the name rule.
"""
super(NameDFilter, self).__init__(fstart_time, fend_time)
self.name_rule_re = name_rule_re
|
Init function for name filter class
Parameters
----------
name_rule_re: str
regular expression for the name rule.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def __init__(self, rule_expression, fstart_time=None, fend_time=None, keep=False):
"""Init function for expression filter class
Parameters
----------
fstart_time: str
filter the feature starting from this time.
fend_time: str
filter the feature ending by this time.
rule_expression: str
an input expression for the rule.
"""
super(ExpressionDFilter, self).__init__(fstart_time, fend_time, keep=keep)
self.rule_expression = rule_expression
|
Init function for expression filter class
Parameters
----------
fstart_time: str
filter the feature starting from this time.
fend_time: str
filter the feature ending by this time.
rule_expression: str
an input expression for the rule.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/filter.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/filter.py
|
MIT
|
def __call__(self, df: pd.DataFrame, instrument, *args, **kwargs):
"""
process the data
NOTE: **The processor could change the content of `df` inplace !!!!! **
User should keep a copy of data outside
Parameters
----------
df : pd.DataFrame
The raw_df of handler or result from previous processor.
"""
|
process the data
NOTE: **The processor could change the content of `df` inplace !!!!! **
User should keep a copy of data outside
Parameters
----------
df : pd.DataFrame
The raw_df of handler or result from previous processor.
|
__call__
|
python
|
microsoft/qlib
|
qlib/data/inst_processor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/inst_processor.py
|
MIT
|
def _load_internal(self, instrument, start_index, end_index, *args):
"""
To avoid error raised by bool type input, we transform the data into float32.
"""
series = self.feature.load(instrument, start_index, end_index, *args)
# TODO: More precision types should be configurable
series = series.astype(np.float32)
return getattr(np, self.func)(series)
|
To avoid error raised by bool type input, we transform the data into float32.
|
_load_internal
|
python
|
microsoft/qlib
|
qlib/data/ops.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/ops.py
|
MIT
|
def __init__(self, feature, freq, func):
"""
Resampling the data to target frequency.
The resample function of pandas is used.
- the timestamp will be at the start of the time span after resample.
Parameters
----------
feature : Expression
An expression for calculating the feature
freq : str
It will be passed into the resample method for resampling basedn on given frequency
func : method
The method to get the resampled values
Some expression are high frequently used
"""
self.feature = feature
self.freq = freq
self.func = func
|
Resampling the data to target frequency.
The resample function of pandas is used.
- the timestamp will be at the start of the time span after resample.
Parameters
----------
feature : Expression
An expression for calculating the feature
freq : str
It will be passed into the resample method for resampling basedn on given frequency
func : method
The method to get the resampled values
Some expression are high frequently used
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/ops.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/ops.py
|
MIT
|
def register(self, ops_list: List[Union[Type[ExpressionOps], dict]]):
"""register operator
Parameters
----------
ops_list : List[Union[Type[ExpressionOps], dict]]
- if type(ops_list) is List[Type[ExpressionOps]], each element of ops_list represents the operator class, which should be the subclass of `ExpressionOps`.
- if type(ops_list) is List[dict], each element of ops_list represents the config of operator, which has the following format:
.. code-block:: text
{
"class": class_name,
"module_path": path,
}
Note: `class` should be the class name of operator, `module_path` should be a python module or path of file.
"""
for _operator in ops_list:
if isinstance(_operator, dict):
_ops_class, _ = get_callable_kwargs(_operator)
else:
_ops_class = _operator
if not issubclass(_ops_class, (Expression,)):
raise TypeError("operator must be subclass of ExpressionOps, not {}".format(_ops_class))
if _ops_class.__name__ in self._ops:
get_module_logger(self.__class__.__name__).warning(
"The custom operator [{}] will override the qlib default definition".format(_ops_class.__name__)
)
self._ops[_ops_class.__name__] = _ops_class
|
register operator
Parameters
----------
ops_list : List[Union[Type[ExpressionOps], dict]]
- if type(ops_list) is List[Type[ExpressionOps]], each element of ops_list represents the operator class, which should be the subclass of `ExpressionOps`.
- if type(ops_list) is List[dict], each element of ops_list represents the config of operator, which has the following format:
.. code-block:: text
{
"class": class_name,
"module_path": path,
}
Note: `class` should be the class name of operator, `module_path` should be a python module or path of file.
|
register
|
python
|
microsoft/qlib
|
qlib/data/ops.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/ops.py
|
MIT
|
def __init__(
self,
instruments=None,
start_time=None,
end_time=None,
data_loader: Union[dict, str, DataLoader] = None,
init_data=True,
fetch_orig=True,
):
"""
Parameters
----------
instruments :
The stock list to retrieve.
start_time :
start_time of the original data.
end_time :
end_time of the original data.
data_loader : Union[dict, str, DataLoader]
data loader to load the data.
init_data :
initialize the original data in the constructor.
fetch_orig : bool
Return the original data instead of copy if possible.
"""
# Setup data loader
assert data_loader is not None # to make start_time end_time could have None default value
# what data source to load data
self.data_loader = init_instance_by_config(
data_loader,
None if (isinstance(data_loader, dict) and "module_path" in data_loader) else data_loader_module,
accept_types=DataLoader,
)
# what data to be loaded from data source
# For IDE auto-completion.
self.instruments = instruments
self.start_time = start_time
self.end_time = end_time
self.fetch_orig = fetch_orig
if init_data:
with TimeInspector.logt("Init data"):
self.setup_data()
super().__init__()
|
Parameters
----------
instruments :
The stock list to retrieve.
start_time :
start_time of the original data.
end_time :
end_time of the original data.
data_loader : Union[dict, str, DataLoader]
data loader to load the data.
init_data :
initialize the original data in the constructor.
fetch_orig : bool
Return the original data instead of copy if possible.
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def config(self, **kwargs):
"""
configuration of data.
# what data to be loaded from data source
This method will be used when loading pickled handler from dataset.
The data will be initialized with different time range.
"""
attr_list = {"instruments", "start_time", "end_time"}
for k, v in kwargs.items():
if k in attr_list:
setattr(self, k, v)
for attr in attr_list:
if attr in kwargs:
kwargs.pop(attr)
super().config(**kwargs)
|
configuration of data.
# what data to be loaded from data source
This method will be used when loading pickled handler from dataset.
The data will be initialized with different time range.
|
config
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def setup_data(self, enable_cache: bool = False):
"""
Set Up the data in case of running initialization for multiple time
It is responsible for maintaining following variable
1) self._data
Parameters
----------
enable_cache : bool
default value is false:
- if `enable_cache` == True:
the processed data will be saved on disk, and handler will load the cached data from the disk directly
when we call `init` next time
"""
# Setup data.
# _data may be with multiple column index level. The outer level indicates the feature set name
with TimeInspector.logt("Loading data"):
# make sure the fetch method is based on an index-sorted pd.DataFrame
self._data = lazy_sort_index(self.data_loader.load(self.instruments, self.start_time, self.end_time))
# TODO: cache
|
Set Up the data in case of running initialization for multiple time
It is responsible for maintaining following variable
1) self._data
Parameters
----------
enable_cache : bool
default value is false:
- if `enable_cache` == True:
the processed data will be saved on disk, and handler will load the cached data from the disk directly
when we call `init` next time
|
setup_data
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def fetch(
self,
selector: Union[pd.Timestamp, slice, str, pd.Index] = slice(None, None),
level: Union[str, int] = "datetime",
col_set: Union[str, List[str]] = CS_ALL,
squeeze: bool = False,
proc_func: Callable = None,
) -> pd.DataFrame:
"""
fetch data from underlying data source
Design motivation:
- providing a unified interface for underlying data.
- Potential to make the interface more friendly.
- User can improve performance when fetching data in this extra layer
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
It can be categories as following
- fetch single index
- fetch a range of index
- a slice range
- pd.Index for specific indexes
Following conflicts may occur
- Does ["20200101", "20210101"] mean selecting this slice or these two days?
- slice have higher priorities
level : Union[str, int]
which index level to select the data
col_set : Union[str, List[str]]
- if isinstance(col_set, str):
select a set of meaningful, pd.Index columns.(e.g. features, columns)
- if col_set == CS_RAW:
the raw dataset will be returned.
- if isinstance(col_set, List[str]):
select several sets of meaningful columns, the returned data has multiple levels
proc_func: Callable
- Give a hook for processing data before fetching
- An example to explain the necessity of the hook:
- A Dataset learned some processors to process data which is related to data segmentation
- It will apply them every time when preparing data.
- The learned processor require the dataframe remains the same format when fitting and applying
- However the data format will change according to the parameters.
- So the processors should be applied to the underlayer data.
squeeze : bool
whether squeeze columns and index
Returns
-------
pd.DataFrame.
"""
return self._fetch_data(
data_storage=self._data,
selector=selector,
level=level,
col_set=col_set,
squeeze=squeeze,
proc_func=proc_func,
)
|
fetch data from underlying data source
Design motivation:
- providing a unified interface for underlying data.
- Potential to make the interface more friendly.
- User can improve performance when fetching data in this extra layer
Parameters
----------
selector : Union[pd.Timestamp, slice, str]
describe how to select data by index
It can be categories as following
- fetch single index
- fetch a range of index
- a slice range
- pd.Index for specific indexes
Following conflicts may occur
- Does ["20200101", "20210101"] mean selecting this slice or these two days?
- slice have higher priorities
level : Union[str, int]
which index level to select the data
col_set : Union[str, List[str]]
- if isinstance(col_set, str):
select a set of meaningful, pd.Index columns.(e.g. features, columns)
- if col_set == CS_RAW:
the raw dataset will be returned.
- if isinstance(col_set, List[str]):
select several sets of meaningful columns, the returned data has multiple levels
proc_func: Callable
- Give a hook for processing data before fetching
- An example to explain the necessity of the hook:
- A Dataset learned some processors to process data which is related to data segmentation
- It will apply them every time when preparing data.
- The learned processor require the dataframe remains the same format when fitting and applying
- However the data format will change according to the parameters.
- So the processors should be applied to the underlayer data.
squeeze : bool
whether squeeze columns and index
Returns
-------
pd.DataFrame.
|
fetch
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def get_cols(self, col_set=CS_ALL) -> list:
"""
get the column names
Parameters
----------
col_set : str
select a set of meaningful columns.(e.g. features, columns)
Returns
-------
list:
list of column names
"""
df = self._data.head()
df = fetch_df_by_col(df, col_set)
return df.columns.to_list()
|
get the column names
Parameters
----------
col_set : str
select a set of meaningful columns.(e.g. features, columns)
Returns
-------
list:
list of column names
|
get_cols
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def get_range_selector(self, cur_date: Union[pd.Timestamp, str], periods: int) -> slice:
"""
get range selector by number of periods
Args:
cur_date (pd.Timestamp or str): current date
periods (int): number of periods
"""
trading_dates = self._data.index.unique(level="datetime")
cur_loc = trading_dates.get_loc(cur_date)
pre_loc = cur_loc - periods + 1
if pre_loc < 0:
warnings.warn("`periods` is too large. the first date will be returned.")
pre_loc = 0
ref_date = trading_dates[pre_loc]
return slice(ref_date, cur_date)
|
get range selector by number of periods
Args:
cur_date (pd.Timestamp or str): current date
periods (int): number of periods
|
get_range_selector
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def get_range_iterator(
self, periods: int, min_periods: Optional[int] = None, **kwargs
) -> Iterator[Tuple[pd.Timestamp, pd.DataFrame]]:
"""
get an iterator of sliced data with given periods
Args:
periods (int): number of periods.
min_periods (int): minimum periods for sliced dataframe.
kwargs (dict): will be passed to `self.fetch`.
"""
trading_dates = self._data.index.unique(level="datetime")
if min_periods is None:
min_periods = periods
for cur_date in trading_dates[min_periods:]:
selector = self.get_range_selector(cur_date, periods)
yield cur_date, self.fetch(selector, **kwargs)
|
get an iterator of sliced data with given periods
Args:
periods (int): number of periods.
min_periods (int): minimum periods for sliced dataframe.
kwargs (dict): will be passed to `self.fetch`.
|
get_range_iterator
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def __init__(
self,
instruments=None,
start_time=None,
end_time=None,
data_loader: Union[dict, str, DataLoader] = None,
infer_processors: List = [],
learn_processors: List = [],
shared_processors: List = [],
process_type=PTYPE_A,
drop_raw=False,
**kwargs,
):
"""
Parameters
----------
infer_processors : list
- list of <description info> of processors to generate data for inference
- example of <description info>:
.. code-block::
1) classname & kwargs:
{
"class": "MinMaxNorm",
"kwargs": {
"fit_start_time": "20080101",
"fit_end_time": "20121231"
}
}
2) Only classname:
"DropnaFeature"
3) object instance of Processor
learn_processors : list
similar to infer_processors, but for generating data for learning models
process_type: str
PTYPE_I = 'independent'
- self._infer will be processed by infer_processors
- self._learn will be processed by learn_processors
PTYPE_A = 'append'
- self._infer will be processed by infer_processors
- self._learn will be processed by infer_processors + learn_processors
- (e.g. self._infer processed by learn_processors )
drop_raw: bool
Whether to drop the raw data
"""
# Setup preprocessor
self.infer_processors = [] # for lint
self.learn_processors = [] # for lint
self.shared_processors = [] # for lint
for pname in "infer_processors", "learn_processors", "shared_processors":
for proc in locals()[pname]:
getattr(self, pname).append(
init_instance_by_config(
proc,
None if (isinstance(proc, dict) and "module_path" in proc) else processor_module,
accept_types=processor_module.Processor,
)
)
self.process_type = process_type
self.drop_raw = drop_raw
super().__init__(instruments, start_time, end_time, data_loader, **kwargs)
|
Parameters
----------
infer_processors : list
- list of <description info> of processors to generate data for inference
- example of <description info>:
.. code-block::
1) classname & kwargs:
{
"class": "MinMaxNorm",
"kwargs": {
"fit_start_time": "20080101",
"fit_end_time": "20121231"
}
}
2) Only classname:
"DropnaFeature"
3) object instance of Processor
learn_processors : list
similar to infer_processors, but for generating data for learning models
process_type: str
PTYPE_I = 'independent'
- self._infer will be processed by infer_processors
- self._learn will be processed by learn_processors
PTYPE_A = 'append'
- self._infer will be processed by infer_processors
- self._learn will be processed by infer_processors + learn_processors
- (e.g. self._infer processed by learn_processors )
drop_raw: bool
Whether to drop the raw data
|
__init__
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def fit(self):
"""
fit data without processing the data
"""
for proc in self.get_all_processors():
with TimeInspector.logt(f"{proc.__class__.__name__}"):
proc.fit(self._data)
|
fit data without processing the data
|
fit
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def _is_proc_readonly(proc_l: List[processor_module.Processor]):
"""
NOTE: it will return True if `len(proc_l) == 0`
"""
for p in proc_l:
if not p.readonly():
return False
return True
|
NOTE: it will return True if `len(proc_l) == 0`
|
_is_proc_readonly
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def process_data(self, with_fit: bool = False):
"""
process_data data. Fun `processor.fit` if necessary
Notation: (data) [processor]
# data processing flow of self.process_type == DataHandlerLP.PTYPE_I
.. code-block:: text
(self._data)-[shared_processors]-(_shared_df)-[learn_processors]-(_learn_df)
\\
-[infer_processors]-(_infer_df)
# data processing flow of self.process_type == DataHandlerLP.PTYPE_A
.. code-block:: text
(self._data)-[shared_processors]-(_shared_df)-[infer_processors]-(_infer_df)-[learn_processors]-(_learn_df)
Parameters
----------
with_fit : bool
The input of the `fit` will be the output of the previous processor
"""
# shared data processors
# 1) assign
_shared_df = self._data
if not self._is_proc_readonly(self.shared_processors): # avoid modifying the original data
_shared_df = _shared_df.copy()
# 2) process
_shared_df = self._run_proc_l(_shared_df, self.shared_processors, with_fit=with_fit, check_for_infer=True)
# data for inference
# 1) assign
_infer_df = _shared_df
if not self._is_proc_readonly(self.infer_processors): # avoid modifying the original data
_infer_df = _infer_df.copy()
# 2) process
_infer_df = self._run_proc_l(_infer_df, self.infer_processors, with_fit=with_fit, check_for_infer=True)
self._infer = _infer_df
# data for learning
# 1) assign
if self.process_type == DataHandlerLP.PTYPE_I:
_learn_df = _shared_df
elif self.process_type == DataHandlerLP.PTYPE_A:
# based on `infer_df` and append the processor
_learn_df = _infer_df
else:
raise NotImplementedError(f"This type of input is not supported")
if not self._is_proc_readonly(self.learn_processors): # avoid modifying the original data
_learn_df = _learn_df.copy()
# 2) process
_learn_df = self._run_proc_l(_learn_df, self.learn_processors, with_fit=with_fit, check_for_infer=False)
self._learn = _learn_df
if self.drop_raw:
del self._data
|
process_data data. Fun `processor.fit` if necessary
Notation: (data) [processor]
# data processing flow of self.process_type == DataHandlerLP.PTYPE_I
.. code-block:: text
(self._data)-[shared_processors]-(_shared_df)-[learn_processors]-(_learn_df)
\
-[infer_processors]-(_infer_df)
# data processing flow of self.process_type == DataHandlerLP.PTYPE_A
.. code-block:: text
(self._data)-[shared_processors]-(_shared_df)-[infer_processors]-(_infer_df)-[learn_processors]-(_learn_df)
Parameters
----------
with_fit : bool
The input of the `fit` will be the output of the previous processor
|
process_data
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def config(self, processor_kwargs: dict = None, **kwargs):
"""
configuration of data.
# what data to be loaded from data source
This method will be used when loading pickled handler from dataset.
The data will be initialized with different time range.
"""
super().config(**kwargs)
if processor_kwargs is not None:
for processor in self.get_all_processors():
processor.config(**processor_kwargs)
|
configuration of data.
# what data to be loaded from data source
This method will be used when loading pickled handler from dataset.
The data will be initialized with different time range.
|
config
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
def setup_data(self, init_type: str = IT_FIT_SEQ, **kwargs):
"""
Set up the data in case of running initialization for multiple time
Parameters
----------
init_type : str
The type `IT_*` listed above.
enable_cache : bool
default value is false:
- if `enable_cache` == True:
the processed data will be saved on disk, and handler will load the cached data from the disk directly
when we call `init` next time
"""
# init raw data
super().setup_data(**kwargs)
with TimeInspector.logt("fit & process data"):
if init_type == DataHandlerLP.IT_FIT_IND:
self.fit()
self.process_data()
elif init_type == DataHandlerLP.IT_LS:
self.process_data()
elif init_type == DataHandlerLP.IT_FIT_SEQ:
self.fit_process_data()
else:
raise NotImplementedError(f"This type of input is not supported")
# TODO: Be able to cache handler data. Save the memory for data processing
|
Set up the data in case of running initialization for multiple time
Parameters
----------
init_type : str
The type `IT_*` listed above.
enable_cache : bool
default value is false:
- if `enable_cache` == True:
the processed data will be saved on disk, and handler will load the cached data from the disk directly
when we call `init` next time
|
setup_data
|
python
|
microsoft/qlib
|
qlib/data/dataset/handler.py
|
https://github.com/microsoft/qlib/blob/master/qlib/data/dataset/handler.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.