code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def collect_data(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
strategy: Union[str, dict, object, Path],
executor: Union[str, dict, object, Path],
benchmark: str = "SH000300",
account: Union[float, int, dict] = 1e9,
exchange_kwargs: dict = {},
pos_type: str = "Position",
return_value: dict | None = None,
) -> Generator[object, None, None]:
"""initialize the strategy and executor, then collect the trade decision data for rl training
please refer to the docs of the backtest for the explanation of the parameters
Yields
-------
object
trade decision
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
yield from collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value=return_value)
|
initialize the strategy and executor, then collect the trade decision data for rl training
please refer to the docs of the backtest for the explanation of the parameters
Yields
-------
object
trade decision
|
collect_data
|
python
|
microsoft/qlib
|
qlib/backtest/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/__init__.py
|
MIT
|
def format_decisions(
decisions: List[BaseTradeDecision],
) -> Optional[Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]]:
"""
format the decisions collected by `qlib.backtest.collect_data`
The decisions will be organized into a tree-like structure.
Parameters
----------
decisions : List[BaseTradeDecision]
decisions collected by `qlib.backtest.collect_data`
Returns
-------
Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
reformat the list of decisions into a more user-friendly format
<decisions> := Tuple[<freq>, List[Tuple[<decision>, <sub decisions>]]]
- <sub decisions> := `<decisions> in lower level` | None
- <freq> := "day" | "30min" | "1min" | ...
- <decision> := <instance of BaseTradeDecision>
"""
if len(decisions) == 0:
return None
cur_freq = decisions[0].strategy.trade_calendar.get_freq()
res: Tuple[str, list] = (cur_freq, [])
last_dec_idx = 0
for i, dec in enumerate(decisions[1:], 1):
if dec.strategy.trade_calendar.get_freq() == cur_freq:
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 : i])))
last_dec_idx = i
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 :])))
return res
|
format the decisions collected by `qlib.backtest.collect_data`
The decisions will be organized into a tree-like structure.
Parameters
----------
decisions : List[BaseTradeDecision]
decisions collected by `qlib.backtest.collect_data`
Returns
-------
Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
reformat the list of decisions into a more user-friendly format
<decisions> := Tuple[<freq>, List[Tuple[<decision>, <sub decisions>]]]
- <sub decisions> := `<decisions> in lower level` | None
- <freq> := "day" | "30min" | "1min" | ...
- <decision> := <instance of BaseTradeDecision>
|
format_decisions
|
python
|
microsoft/qlib
|
qlib/backtest/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/__init__.py
|
MIT
|
def risk_analysis(r, N: int = None, freq: str = "day", mode: Literal["sum", "product"] = "sum"):
"""Risk Analysis
NOTE:
The calculation of annualized return is different from the definition of annualized return.
It is implemented by design.
Qlib tries to cumulate returns by summation instead of production to avoid the cumulated curve being skewed exponentially.
All the calculation of annualized returns follows this principle in Qlib.
Parameters
----------
r : pandas.Series
daily return series.
N: int
scaler for annualizing information_ratio (day: 252, week: 50, month: 12), at least one of `N` and `freq` should exist
freq: str
analysis frequency used for calculating the scaler, at least one of `N` and `freq` should exist
mode: Literal["sum", "product"]
the method by which returns are accumulated:
- "sum": Arithmetic accumulation (linear returns).
- "product": Geometric accumulation (compounded returns).
"""
def cal_risk_analysis_scaler(freq):
_count, _freq = Freq.parse(freq)
_freq_scaler = {
Freq.NORM_FREQ_MINUTE: 240 * 238,
Freq.NORM_FREQ_DAY: 238,
Freq.NORM_FREQ_WEEK: 50,
Freq.NORM_FREQ_MONTH: 12,
}
return _freq_scaler[_freq] / _count
if N is None and freq is None:
raise ValueError("at least one of `N` and `freq` should exist")
if N is not None and freq is not None:
warnings.warn("risk_analysis freq will be ignored")
if N is None:
N = cal_risk_analysis_scaler(freq)
if mode == "sum":
mean = r.mean()
std = r.std(ddof=1)
annualized_return = mean * N
max_drawdown = (r.cumsum() - r.cumsum().cummax()).min()
elif mode == "product":
cumulative_curve = (1 + r).cumprod()
# geometric mean (compound annual growth rate)
mean = cumulative_curve.iloc[-1] ** (1 / len(r)) - 1
# volatility of log returns
std = np.log(1 + r).std(ddof=1)
cumulative_return = cumulative_curve.iloc[-1] - 1
annualized_return = (1 + cumulative_return) ** (N / len(r)) - 1
# max percentage drawdown from peak cumulative product
max_drawdown = (cumulative_curve / cumulative_curve.cummax() - 1).min()
else:
raise ValueError(f"risk_analysis accumulation mode {mode} is not supported. Expected `sum` or `product`.")
information_ratio = mean / std * np.sqrt(N)
data = {
"mean": mean,
"std": std,
"annualized_return": annualized_return,
"information_ratio": information_ratio,
"max_drawdown": max_drawdown,
}
res = pd.Series(data).to_frame("risk")
return res
|
Risk Analysis
NOTE:
The calculation of annualized return is different from the definition of annualized return.
It is implemented by design.
Qlib tries to cumulate returns by summation instead of production to avoid the cumulated curve being skewed exponentially.
All the calculation of annualized returns follows this principle in Qlib.
Parameters
----------
r : pandas.Series
daily return series.
N: int
scaler for annualizing information_ratio (day: 252, week: 50, month: 12), at least one of `N` and `freq` should exist
freq: str
analysis frequency used for calculating the scaler, at least one of `N` and `freq` should exist
mode: Literal["sum", "product"]
the method by which returns are accumulated:
- "sum": Arithmetic accumulation (linear returns).
- "product": Geometric accumulation (compounded returns).
|
risk_analysis
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate.py
|
MIT
|
def indicator_analysis(df, method="mean"):
"""analyze statistical time-series indicators of trading
Parameters
----------
df : pandas.DataFrame
columns: like ['pa', 'pos', 'ffr', 'deal_amount', 'value'].
Necessary fields:
- 'pa' is the price advantage in trade indicators
- 'pos' is the positive rate in trade indicators
- 'ffr' is the fulfill rate in trade indicators
Optional fields:
- 'deal_amount' is the total deal deal_amount, only necessary when method is 'amount_weighted'
- 'value' is the total trade value, only necessary when method is 'value_weighted'
index: Index(datetime)
method : str, optional
statistics method of pa/ffr, by default "mean"
- if method is 'mean', count the mean statistical value of each trade indicator
- if method is 'amount_weighted', count the deal_amount weighted mean statistical value of each trade indicator
- if method is 'value_weighted', count the value weighted mean statistical value of each trade indicator
Note: statistics method of pos is always "mean"
Returns
-------
pd.DataFrame
statistical value of each trade indicators
"""
weights_dict = {
"mean": df["count"],
"amount_weighted": df["deal_amount"].abs(),
"value_weighted": df["value"].abs(),
}
if method not in weights_dict:
raise ValueError(f"indicator_analysis method {method} is not supported!")
# statistic pa/ffr indicator
indicators_df = df[["ffr", "pa"]]
weights = weights_dict.get(method)
res = indicators_df.mul(weights, axis=0).sum() / weights.sum()
# statistic pos
weights = weights_dict.get("mean")
res.loc["pos"] = df["pos"].mul(weights).sum() / weights.sum()
res = res.to_frame("value")
return res
|
analyze statistical time-series indicators of trading
Parameters
----------
df : pandas.DataFrame
columns: like ['pa', 'pos', 'ffr', 'deal_amount', 'value'].
Necessary fields:
- 'pa' is the price advantage in trade indicators
- 'pos' is the positive rate in trade indicators
- 'ffr' is the fulfill rate in trade indicators
Optional fields:
- 'deal_amount' is the total deal deal_amount, only necessary when method is 'amount_weighted'
- 'value' is the total trade value, only necessary when method is 'value_weighted'
index: Index(datetime)
method : str, optional
statistics method of pa/ffr, by default "mean"
- if method is 'mean', count the mean statistical value of each trade indicator
- if method is 'amount_weighted', count the deal_amount weighted mean statistical value of each trade indicator
- if method is 'value_weighted', count the value weighted mean statistical value of each trade indicator
Note: statistics method of pos is always "mean"
Returns
-------
pd.DataFrame
statistical value of each trade indicators
|
indicator_analysis
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate.py
|
MIT
|
def _get_position_value_from_df(evaluate_date, position, close_data_df):
"""Get position value by existed close data df
close_data_df:
pd.DataFrame
multi-index
close_data_df['$close'][stock_id][evaluate_date]: close price for (stock_id, evaluate_date)
position:
same in get_position_value()
"""
value = 0
for stock_id, report in position.items():
if stock_id != "cash":
value += report["amount"] * close_data_df["$close"][stock_id][evaluate_date]
# value += report['amount'] * report['price']
if "cash" in position:
value += position["cash"]
return value
|
Get position value by existed close data df
close_data_df:
pd.DataFrame
multi-index
close_data_df['$close'][stock_id][evaluate_date]: close price for (stock_id, evaluate_date)
position:
same in get_position_value()
|
_get_position_value_from_df
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_position_value(evaluate_date, position):
"""sum of close*amount
get value of position
use close price
positions:
{
Timestamp('2016-01-05 00:00:00'):
{
'SH600022':
{
'amount':100.00,
'price':12.00
},
'cash':100000.0
}
}
It means Hold 100.0 'SH600022' and 100000.0 RMB in '2016-01-05'
"""
# load close price for position
# position should also consider cash
instruments = list(position.keys())
instruments = list(set(instruments) - {"cash"}) # filter 'cash'
fields = ["$close"]
close_data_df = D.features(
instruments,
fields,
start_time=evaluate_date,
end_time=evaluate_date,
freq="day",
disk_cache=0,
)
value = _get_position_value_from_df(evaluate_date, position, close_data_df)
return value
|
sum of close*amount
get value of position
use close price
positions:
{
Timestamp('2016-01-05 00:00:00'):
{
'SH600022':
{
'amount':100.00,
'price':12.00
},
'cash':100000.0
}
}
It means Hold 100.0 'SH600022' and 100000.0 RMB in '2016-01-05'
|
get_position_value
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_daily_return_series_from_positions(positions, init_asset_value):
"""Parameters
generate daily return series from position view
positions: positions generated by strategy
init_asset_value : init asset value
return: pd.Series of daily return , return_series[date] = daily return rate
"""
value_dict = get_position_list_value(positions)
value_series = pd.Series(value_dict)
value_series = value_series.sort_index() # check date
return_series = value_series.pct_change()
return_series[value_series.index[0]] = (
value_series[value_series.index[0]] / init_asset_value - 1
) # update daily return for the first date
return return_series
|
Parameters
generate daily return series from position view
positions: positions generated by strategy
init_asset_value : init asset value
return: pd.Series of daily return , return_series[date] = daily return rate
|
get_daily_return_series_from_positions
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_annual_return_from_positions(positions, init_asset_value):
"""Annualized Returns
p_r = (p_end / p_start)^{(250/n)} - 1
p_r annual return
p_end final value
p_start init value
n days of backtest
"""
date_range_list = sorted(list(positions.keys()))
end_time = date_range_list[-1]
p_end = get_position_value(end_time, positions[end_time])
p_start = init_asset_value
n_period = len(date_range_list)
annual = pow((p_end / p_start), (250 / n_period)) - 1
return annual
|
Annualized Returns
p_r = (p_end / p_start)^{(250/n)} - 1
p_r annual return
p_end final value
p_start init value
n days of backtest
|
get_annual_return_from_positions
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_annaul_return_from_return_series(r, method="ci"):
"""Risk Analysis from daily return series
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
"""
mean = r.mean()
annual = (1 + mean) ** 250 - 1 if method == "ci" else mean * 250
return annual
|
Risk Analysis from daily return series
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
|
get_annaul_return_from_return_series
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_sharpe_ratio_from_return_series(r, risk_free_rate=0.00, method="ci"):
"""Risk Analysis
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
risk_free_rate : float
risk_free_rate, default as 0.00, can set as 0.03 etc
"""
std = r.std(ddof=1)
annual = get_annaul_return_from_return_series(r, method=method)
sharpe = (annual - risk_free_rate) / std / np.sqrt(250)
return sharpe
|
Risk Analysis
Parameters
----------
r : pandas.Series
daily return series
method : str
interest calculation method, ci(compound interest)/si(simple interest)
risk_free_rate : float
risk_free_rate, default as 0.00, can set as 0.03 etc
|
get_sharpe_ratio_from_return_series
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_max_drawdown_from_series(r):
"""Risk Analysis from asset value
cumprod way
Parameters
----------
r : pandas.Series
daily return series
"""
# mdd = ((r.cumsum() - r.cumsum().cummax()) / (1 + r.cumsum().cummax())).min()
mdd = (((1 + r).cumprod() - (1 + r).cumprod().cummax()) / ((1 + r).cumprod().cummax())).min()
return mdd
|
Risk Analysis from asset value
cumprod way
Parameters
----------
r : pandas.Series
daily return series
|
get_max_drawdown_from_series
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def get_beta(r, b):
"""Risk Analysis beta
Parameters
----------
r : pandas.Series
daily return series of strategy
b : pandas.Series
daily return series of baseline
"""
cov_r_b = np.cov(r, b)
var_b = np.var(b)
return cov_r_b / var_b
|
Risk Analysis beta
Parameters
----------
r : pandas.Series
daily return series of strategy
b : pandas.Series
daily return series of baseline
|
get_beta
|
python
|
microsoft/qlib
|
qlib/contrib/evaluate_portfolio.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/evaluate_portfolio.py
|
MIT
|
def _create_ts_slices(index, seq_len):
"""
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
"""
assert isinstance(index, pd.MultiIndex), "unsupported index type"
assert seq_len > 0, "sequence length should be larger than 0"
assert index.is_monotonic_increasing, "index should be sorted"
# number of dates for each instrument
sample_count_by_insts = index.to_series().groupby(level=0, group_keys=False).size().values
# start index for each instrument
start_index_of_insts = np.roll(np.cumsum(sample_count_by_insts), 1)
start_index_of_insts[0] = 0
# all the [start, stop) indices of features
# features between [start, stop) will be used to predict label at `stop - 1`
slices = []
for cur_loc, cur_cnt in zip(start_index_of_insts, sample_count_by_insts):
for stop in range(1, cur_cnt + 1):
end = cur_loc + stop
start = max(end - seq_len, 0)
slices.append(slice(start, end))
slices = np.array(slices, dtype="object")
assert len(slices) == len(index) # the i-th slice = index[i]
return slices
|
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
|
_create_ts_slices
|
python
|
microsoft/qlib
|
qlib/contrib/data/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/dataset.py
|
MIT
|
def _get_date_parse_fn(target):
"""get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
"""
if isinstance(target, int):
def _fn(x):
return int(str(x).replace("-", "")[:8]) # 20200201
elif isinstance(target, str) and len(target) == 8:
def _fn(x):
return str(x).replace("-", "")[:8] # '20200201'
else:
def _fn(x):
return x # '2021-01-01'
return _fn
|
get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
|
_get_date_parse_fn
|
python
|
microsoft/qlib
|
qlib/contrib/data/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/dataset.py
|
MIT
|
def _maybe_padding(x, seq_len, zeros=None):
"""padding 2d <time * feature> data with zeros
Args:
x (np.ndarray): 2d data with shape <time * feature>
seq_len (int): target sequence length
zeros (np.ndarray): zeros with shape <seq_len * feature>
"""
assert seq_len > 0, "sequence length should be larger than 0"
if zeros is None:
zeros = np.zeros((seq_len, x.shape[1]), dtype=np.float32)
else:
assert len(zeros) >= seq_len, "zeros matrix is not large enough for padding"
if len(x) != seq_len: # padding zeros
x = np.concatenate([zeros[: seq_len - len(x), : x.shape[1]], x], axis=0)
return x
|
padding 2d <time * feature> data with zeros
Args:
x (np.ndarray): 2d data with shape <time * feature>
seq_len (int): target sequence length
zeros (np.ndarray): zeros with shape <seq_len * feature>
|
_maybe_padding
|
python
|
microsoft/qlib
|
qlib/contrib/data/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/dataset.py
|
MIT
|
def get_pre_datasets(self):
"""Generate the training, validation and test datasets for prediction
Returns:
Tuple[BaseDataset, BaseDataset, BaseDataset]: The training and test datasets
"""
dict_feature_path = self.feature_conf["path"]
train_feature_path = dict_feature_path[:-4] + "_train.pkl"
valid_feature_path = dict_feature_path[:-4] + "_valid.pkl"
test_feature_path = dict_feature_path[:-4] + "_test.pkl"
dict_label_path = self.label_conf["path"]
train_label_path = dict_label_path[:-4] + "_train.pkl"
valid_label_path = dict_label_path[:-4] + "_valid.pkl"
test_label_path = dict_label_path[:-4] + "_test.pkl"
if (
not os.path.isfile(train_feature_path)
or not os.path.isfile(valid_feature_path)
or not os.path.isfile(test_feature_path)
):
xtrain, xvalid, xtest = self._gen_data(self.feature_conf)
xtrain.to_pickle(train_feature_path)
xvalid.to_pickle(valid_feature_path)
xtest.to_pickle(test_feature_path)
del xtrain, xvalid, xtest
if (
not os.path.isfile(train_label_path)
or not os.path.isfile(valid_label_path)
or not os.path.isfile(test_label_path)
):
ytrain, yvalid, ytest = self._gen_data(self.label_conf)
ytrain.to_pickle(train_label_path)
yvalid.to_pickle(valid_label_path)
ytest.to_pickle(test_label_path)
del ytrain, yvalid, ytest
feature = {
"train": train_feature_path,
"valid": valid_feature_path,
"test": test_feature_path,
}
label = {
"train": train_label_path,
"valid": valid_label_path,
"test": test_label_path,
}
return feature, label
|
Generate the training, validation and test datasets for prediction
Returns:
Tuple[BaseDataset, BaseDataset, BaseDataset]: The training and test datasets
|
get_pre_datasets
|
python
|
microsoft/qlib
|
qlib/contrib/data/highfreq_provider.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/highfreq_provider.py
|
MIT
|
def get_feature_config(
config={
"kbar": {},
"price": {
"windows": [0],
"feature": ["OPEN", "HIGH", "LOW", "VWAP"],
},
"rolling": {},
}
):
"""create factors from config
config = {
'kbar': {}, # whether to use some hard-code kbar features
'price': { # whether to use raw price features
'windows': [0, 1, 2, 3, 4], # use price at n days ago
'feature': ['OPEN', 'HIGH', 'LOW'] # which price field to use
},
'volume': { # whether to use raw volume features
'windows': [0, 1, 2, 3, 4], # use volume at n days ago
},
'rolling': { # whether to use rolling operator based features
'windows': [5, 10, 20, 30, 60], # rolling windows size
'include': ['ROC', 'MA', 'STD'], # rolling operator to use
#if include is None we will use default operators
'exclude': ['RANK'], # rolling operator not to use
}
}
"""
fields = []
names = []
if "kbar" in config:
fields += [
"($close-$open)/$open",
"($high-$low)/$open",
"($close-$open)/($high-$low+1e-12)",
"($high-Greater($open, $close))/$open",
"($high-Greater($open, $close))/($high-$low+1e-12)",
"(Less($open, $close)-$low)/$open",
"(Less($open, $close)-$low)/($high-$low+1e-12)",
"(2*$close-$high-$low)/$open",
"(2*$close-$high-$low)/($high-$low+1e-12)",
]
names += [
"KMID",
"KLEN",
"KMID2",
"KUP",
"KUP2",
"KLOW",
"KLOW2",
"KSFT",
"KSFT2",
]
if "price" in config:
windows = config["price"].get("windows", range(5))
feature = config["price"].get("feature", ["OPEN", "HIGH", "LOW", "CLOSE", "VWAP"])
for field in feature:
field = field.lower()
fields += ["Ref($%s, %d)/$close" % (field, d) if d != 0 else "$%s/$close" % field for d in windows]
names += [field.upper() + str(d) for d in windows]
if "volume" in config:
windows = config["volume"].get("windows", range(5))
fields += ["Ref($volume, %d)/($volume+1e-12)" % d if d != 0 else "$volume/($volume+1e-12)" for d in windows]
names += ["VOLUME" + str(d) for d in windows]
if "rolling" in config:
windows = config["rolling"].get("windows", [5, 10, 20, 30, 60])
include = config["rolling"].get("include", None)
exclude = config["rolling"].get("exclude", [])
# `exclude` in dataset config unnecessary filed
# `include` in dataset config necessary field
def use(x):
return x not in exclude and (include is None or x in include)
# Some factor ref: https://guorn.com/static/upload/file/3/134065454575605.pdf
if use("ROC"):
# https://www.investopedia.com/terms/r/rateofchange.asp
# Rate of change, the price change in the past d days, divided by latest close price to remove unit
fields += ["Ref($close, %d)/$close" % d for d in windows]
names += ["ROC%d" % d for d in windows]
if use("MA"):
# https://www.investopedia.com/ask/answers/071414/whats-difference-between-moving-average-and-weighted-moving-average.asp
# Simple Moving Average, the simple moving average in the past d days, divided by latest close price to remove unit
fields += ["Mean($close, %d)/$close" % d for d in windows]
names += ["MA%d" % d for d in windows]
if use("STD"):
# The standard diviation of close price for the past d days, divided by latest close price to remove unit
fields += ["Std($close, %d)/$close" % d for d in windows]
names += ["STD%d" % d for d in windows]
if use("BETA"):
# The rate of close price change in the past d days, divided by latest close price to remove unit
# For example, price increase 10 dollar per day in the past d days, then Slope will be 10.
fields += ["Slope($close, %d)/$close" % d for d in windows]
names += ["BETA%d" % d for d in windows]
if use("RSQR"):
# The R-sqaure value of linear regression for the past d days, represent the trend linear
fields += ["Rsquare($close, %d)" % d for d in windows]
names += ["RSQR%d" % d for d in windows]
if use("RESI"):
# The redisdual for linear regression for the past d days, represent the trend linearity for past d days.
fields += ["Resi($close, %d)/$close" % d for d in windows]
names += ["RESI%d" % d for d in windows]
if use("MAX"):
# The max price for past d days, divided by latest close price to remove unit
fields += ["Max($high, %d)/$close" % d for d in windows]
names += ["MAX%d" % d for d in windows]
if use("LOW"):
# The low price for past d days, divided by latest close price to remove unit
fields += ["Min($low, %d)/$close" % d for d in windows]
names += ["MIN%d" % d for d in windows]
if use("QTLU"):
# The 80% quantile of past d day's close price, divided by latest close price to remove unit
# Used with MIN and MAX
fields += ["Quantile($close, %d, 0.8)/$close" % d for d in windows]
names += ["QTLU%d" % d for d in windows]
if use("QTLD"):
# The 20% quantile of past d day's close price, divided by latest close price to remove unit
fields += ["Quantile($close, %d, 0.2)/$close" % d for d in windows]
names += ["QTLD%d" % d for d in windows]
if use("RANK"):
# Get the percentile of current close price in past d day's close price.
# Represent the current price level comparing to past N days, add additional information to moving average.
fields += ["Rank($close, %d)" % d for d in windows]
names += ["RANK%d" % d for d in windows]
if use("RSV"):
# Represent the price position between upper and lower resistent price for past d days.
fields += ["($close-Min($low, %d))/(Max($high, %d)-Min($low, %d)+1e-12)" % (d, d, d) for d in windows]
names += ["RSV%d" % d for d in windows]
if use("IMAX"):
# The number of days between current date and previous highest price date.
# Part of Aroon Indicator https://www.investopedia.com/terms/a/aroon.asp
# The indicator measures the time between highs and the time between lows over a time period.
# The idea is that strong uptrends will regularly see new highs, and strong downtrends will regularly see new lows.
fields += ["IdxMax($high, %d)/%d" % (d, d) for d in windows]
names += ["IMAX%d" % d for d in windows]
if use("IMIN"):
# The number of days between current date and previous lowest price date.
# Part of Aroon Indicator https://www.investopedia.com/terms/a/aroon.asp
# The indicator measures the time between highs and the time between lows over a time period.
# The idea is that strong uptrends will regularly see new highs, and strong downtrends will regularly see new lows.
fields += ["IdxMin($low, %d)/%d" % (d, d) for d in windows]
names += ["IMIN%d" % d for d in windows]
if use("IMXD"):
# The time period between previous lowest-price date occur after highest price date.
# Large value suggest downward momemtum.
fields += ["(IdxMax($high, %d)-IdxMin($low, %d))/%d" % (d, d, d) for d in windows]
names += ["IMXD%d" % d for d in windows]
if use("CORR"):
# The correlation between absolute close price and log scaled trading volume
fields += ["Corr($close, Log($volume+1), %d)" % d for d in windows]
names += ["CORR%d" % d for d in windows]
if use("CORD"):
# The correlation between price change ratio and volume change ratio
fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), %d)" % d for d in windows]
names += ["CORD%d" % d for d in windows]
if use("CNTP"):
# The percentage of days in past d days that price go up.
fields += ["Mean($close>Ref($close, 1), %d)" % d for d in windows]
names += ["CNTP%d" % d for d in windows]
if use("CNTN"):
# The percentage of days in past d days that price go down.
fields += ["Mean($close<Ref($close, 1), %d)" % d for d in windows]
names += ["CNTN%d" % d for d in windows]
if use("CNTD"):
# The diff between past up day and past down day
fields += ["Mean($close>Ref($close, 1), %d)-Mean($close<Ref($close, 1), %d)" % (d, d) for d in windows]
names += ["CNTD%d" % d for d in windows]
if use("SUMP"):
# The total gain / the absolute total price changed
# Similar to RSI indicator. https://www.investopedia.com/terms/r/rsi.asp
fields += [
"Sum(Greater($close-Ref($close, 1), 0), %d)/(Sum(Abs($close-Ref($close, 1)), %d)+1e-12)" % (d, d)
for d in windows
]
names += ["SUMP%d" % d for d in windows]
if use("SUMN"):
# The total lose / the absolute total price changed
# Can be derived from SUMP by SUMN = 1 - SUMP
# Similar to RSI indicator. https://www.investopedia.com/terms/r/rsi.asp
fields += [
"Sum(Greater(Ref($close, 1)-$close, 0), %d)/(Sum(Abs($close-Ref($close, 1)), %d)+1e-12)" % (d, d)
for d in windows
]
names += ["SUMN%d" % d for d in windows]
if use("SUMD"):
# The diff ratio between total gain and total lose
# Similar to RSI indicator. https://www.investopedia.com/terms/r/rsi.asp
fields += [
"(Sum(Greater($close-Ref($close, 1), 0), %d)-Sum(Greater(Ref($close, 1)-$close, 0), %d))"
"/(Sum(Abs($close-Ref($close, 1)), %d)+1e-12)" % (d, d, d)
for d in windows
]
names += ["SUMD%d" % d for d in windows]
if use("VMA"):
# Simple Volume Moving average: https://www.barchart.com/education/technical-indicators/volume_moving_average
fields += ["Mean($volume, %d)/($volume+1e-12)" % d for d in windows]
names += ["VMA%d" % d for d in windows]
if use("VSTD"):
# The standard deviation for volume in past d days.
fields += ["Std($volume, %d)/($volume+1e-12)" % d for d in windows]
names += ["VSTD%d" % d for d in windows]
if use("WVMA"):
# The volume weighted price change volatility
fields += [
"Std(Abs($close/Ref($close, 1)-1)*$volume, %d)/(Mean(Abs($close/Ref($close, 1)-1)*$volume, %d)+1e-12)"
% (d, d)
for d in windows
]
names += ["WVMA%d" % d for d in windows]
if use("VSUMP"):
# The total volume increase / the absolute total volume changed
fields += [
"Sum(Greater($volume-Ref($volume, 1), 0), %d)/(Sum(Abs($volume-Ref($volume, 1)), %d)+1e-12)"
% (d, d)
for d in windows
]
names += ["VSUMP%d" % d for d in windows]
if use("VSUMN"):
# The total volume increase / the absolute total volume changed
# Can be derived from VSUMP by VSUMN = 1 - VSUMP
fields += [
"Sum(Greater(Ref($volume, 1)-$volume, 0), %d)/(Sum(Abs($volume-Ref($volume, 1)), %d)+1e-12)"
% (d, d)
for d in windows
]
names += ["VSUMN%d" % d for d in windows]
if use("VSUMD"):
# The diff ratio between total volume increase and total volume decrease
# RSI indicator for volume
fields += [
"(Sum(Greater($volume-Ref($volume, 1), 0), %d)-Sum(Greater(Ref($volume, 1)-$volume, 0), %d))"
"/(Sum(Abs($volume-Ref($volume, 1)), %d)+1e-12)" % (d, d, d)
for d in windows
]
names += ["VSUMD%d" % d for d in windows]
return fields, names
|
create factors from config
config = {
'kbar': {}, # whether to use some hard-code kbar features
'price': { # whether to use raw price features
'windows': [0, 1, 2, 3, 4], # use price at n days ago
'feature': ['OPEN', 'HIGH', 'LOW'] # which price field to use
},
'volume': { # whether to use raw volume features
'windows': [0, 1, 2, 3, 4], # use volume at n days ago
},
'rolling': { # whether to use rolling operator based features
'windows': [5, 10, 20, 30, 60], # rolling windows size
'include': ['ROC', 'MA', 'STD'], # rolling operator to use
#if include is None we will use default operators
'exclude': ['RANK'], # rolling operator not to use
}
}
|
get_feature_config
|
python
|
microsoft/qlib
|
qlib/contrib/data/loader.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/loader.py
|
MIT
|
def __init__(self, df_dict: Dict[str, pd.DataFrame], join: str, skip_align=False):
"""
initialize the data based on the dataframe dictionary
Parameters
----------
df_dict : Dict[str, pd.DataFrame]
dataframe dictionary
join : str
how to join the data
It will reindex the dataframe based on the join key.
If join is None, the reindex step will be skipped
skip_align :
for some cases, we can improve performance by skipping aligning index
"""
self.join = join
if skip_align:
self._df_dict = df_dict
else:
self._df_dict = align_index(df_dict, join)
|
initialize the data based on the dataframe dictionary
Parameters
----------
df_dict : Dict[str, pd.DataFrame]
dataframe dictionary
join : str
how to join the data
It will reindex the dataframe based on the join key.
If join is None, the reindex step will be skipped
skip_align :
for some cases, we can improve performance by skipping aligning index
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/data/utils/sepdf.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/utils/sepdf.py
|
MIT
|
def apply_each(self, method: str, skip_align=True, *args, **kwargs):
"""
Assumptions:
- inplace methods will return None
"""
inplace = False
df_dict = {}
for k, df in self._df_dict.items():
df_dict[k] = getattr(df, method)(*args, **kwargs)
if df_dict[k] is None:
inplace = True
if not inplace:
return SepDataFrame(df_dict=df_dict, join=self.join, skip_align=skip_align)
|
Assumptions:
- inplace methods will return None
|
apply_each
|
python
|
microsoft/qlib
|
qlib/contrib/data/utils/sepdf.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/data/utils/sepdf.py
|
MIT
|
def calc_long_short_prec(
pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False
) -> Tuple[pd.Series, pd.Series]:
"""
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
"""
if is_alpha:
label = label - label.groupby(level=date_col, group_keys=False).mean()
if int(1 / quantile) >= len(label.index.get_level_values(1).unique()):
raise ValueError("Need more instruments to calculate precision")
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col, group_keys=False)
def N(x):
return int(len(x) * quantile)
# find the top/low quantile of prediction and treat them as long and short target
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label)
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label)
groupll = long.groupby(date_col, group_keys=False)
l_dom = groupll.apply(lambda x: x > 0)
l_c = groupll.count()
groups = short.groupby(date_col, group_keys=False)
s_dom = groups.apply(lambda x: x < 0)
s_c = groups.count()
return (l_dom.groupby(date_col, group_keys=False).sum() / l_c), (
s_dom.groupby(date_col, group_keys=False).sum() / s_c
)
|
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
|
calc_long_short_prec
|
python
|
microsoft/qlib
|
qlib/contrib/eva/alpha.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/eva/alpha.py
|
MIT
|
def calc_long_short_return(
pred: pd.Series,
label: pd.Series,
date_col: str = "datetime",
quantile: float = 0.2,
dropna: bool = False,
) -> Tuple[pd.Series, pd.Series]:
"""
calculate long-short return
Note:
`label` must be raw stock returns.
Parameters
----------
pred : pd.Series
stock predictions
label : pd.Series
stock returns
date_col : str
datetime index name
quantile : float
long-short quantile
Returns
----------
long_short_r : pd.Series
daily long-short returns
long_avg_r : pd.Series
daily long-average returns
"""
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col, group_keys=False)
def N(x):
return int(len(x) * quantile)
r_long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label.mean())
r_short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label.mean())
r_avg = group.label.mean()
return (r_long - r_short) / 2, r_avg
|
calculate long-short return
Note:
`label` must be raw stock returns.
Parameters
----------
pred : pd.Series
stock predictions
label : pd.Series
stock returns
date_col : str
datetime index name
quantile : float
long-short quantile
Returns
----------
long_short_r : pd.Series
daily long-short returns
long_avg_r : pd.Series
daily long-average returns
|
calc_long_short_return
|
python
|
microsoft/qlib
|
qlib/contrib/eva/alpha.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/eva/alpha.py
|
MIT
|
def pred_autocorr_all(pred_dict, n_jobs=-1, **kwargs):
"""
calculate auto correlation for pred_dict
Parameters
----------
pred_dict : dict
A dict like {<method_name>: <prediction>}
kwargs :
all these arguments will be passed into pred_autocorr
"""
ac_dict = {}
for k, pred in pred_dict.items():
ac_dict[k] = delayed(pred_autocorr)(pred, **kwargs)
return complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), ac_dict)
|
calculate auto correlation for pred_dict
Parameters
----------
pred_dict : dict
A dict like {<method_name>: <prediction>}
kwargs :
all these arguments will be passed into pred_autocorr
|
pred_autocorr_all
|
python
|
microsoft/qlib
|
qlib/contrib/eva/alpha.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/eva/alpha.py
|
MIT
|
def calc_ic(pred: pd.Series, label: pd.Series, date_col="datetime", dropna=False) -> (pd.Series, pd.Series):
"""calc_ic.
Parameters
----------
pred :
pred
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
ic and rank ic
"""
df = pd.DataFrame({"pred": pred, "label": label})
ic = df.groupby(date_col, group_keys=False).apply(lambda df: df["pred"].corr(df["label"]))
ric = df.groupby(date_col, group_keys=False).apply(lambda df: df["pred"].corr(df["label"], method="spearman"))
if dropna:
return ic.dropna(), ric.dropna()
else:
return ic, ric
|
calc_ic.
Parameters
----------
pred :
pred
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
ic and rank ic
|
calc_ic
|
python
|
microsoft/qlib
|
qlib/contrib/eva/alpha.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/eva/alpha.py
|
MIT
|
def calc_all_ic(pred_dict_all, label, date_col="datetime", dropna=False, n_jobs=-1):
"""calc_all_ic.
Parameters
----------
pred_dict_all :
A dict like {<method_name>: <prediction>}
label:
A pd.Series of label values
Returns
-------
{'Q2+IND_z': {'ic': <ic series like>
2016-01-04 -0.057407
...
2020-05-28 0.183470
2020-05-29 0.171393
'ric': <rank ic series like>
2016-01-04 -0.040888
...
2020-05-28 0.236665
2020-05-29 0.183886
}
...}
"""
pred_all_ics = {}
for k, pred in pred_dict_all.items():
pred_all_ics[k] = DelayedDict(["ic", "ric"], delayed(calc_ic)(pred, label, date_col=date_col, dropna=dropna))
pred_all_ics = complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), pred_all_ics)
return pred_all_ics
|
calc_all_ic.
Parameters
----------
pred_dict_all :
A dict like {<method_name>: <prediction>}
label:
A pd.Series of label values
Returns
-------
{'Q2+IND_z': {'ic': <ic series like>
2016-01-04 -0.057407
...
2020-05-28 0.183470
2020-05-29 0.171393
'ric': <rank ic series like>
2016-01-04 -0.040888
...
2020-05-28 0.236665
2020-05-29 0.183886
}
...}
|
calc_all_ic
|
python
|
microsoft/qlib
|
qlib/contrib/eva/alpha.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/eva/alpha.py
|
MIT
|
def setup(self, trainer=TrainerR, trainer_kwargs={}):
"""
after running this function `self.data_ic_df` will become set.
Each col represents a data.
Each row represents the Timestamp of performance of that data.
For example,
.. code-block:: python
2021-06-21 2021-06-04 2021-05-21 2021-05-07 2021-04-20 2021-04-06 2021-03-22 2021-03-08 ...
2021-07-02 2021-06-18 2021-06-03 2021-05-20 2021-05-06 2021-04-19 2021-04-02 2021-03-19 ...
datetime ...
2018-01-02 0.079782 0.115975 0.070866 0.028849 -0.081170 0.140380 0.063864 0.110987 ...
2018-01-03 0.123386 0.107789 0.071037 0.045278 -0.060782 0.167446 0.089779 0.124476 ...
2018-01-04 0.140775 0.097206 0.063702 0.042415 -0.078164 0.173218 0.098914 0.114389 ...
2018-01-05 0.030320 -0.037209 -0.044536 -0.047267 -0.081888 0.045648 0.059947 0.047652 ...
2018-01-08 0.107201 0.009219 -0.015995 -0.036594 -0.086633 0.108965 0.122164 0.108508 ...
... ... ... ... ... ... ... ... ... ...
"""
# 1) prepare the prediction of proxy models
perf_task_tpl = deepcopy(self.task_tpl) # this task is supposed to contains no complicated objects
# The only thing we want to save is the prediction
perf_task_tpl["record"] = ["qlib.workflow.record_temp.SignalRecord"]
trainer = auto_filter_kwargs(trainer)(experiment_name=self.exp_name, **trainer_kwargs)
# NOTE:
# The handler is initialized for only once.
if not trainer.has_worker():
self.dh = init_task_handler(perf_task_tpl)
self.dh.config(dump_all=False) # in some cases, the data handler are saved to disk with `dump_all=True`
else:
self.dh = init_instance_by_config(perf_task_tpl["dataset"]["kwargs"]["handler"])
assert self.dh.dump_all is False # otherwise, it will save all the detailed data
seg = perf_task_tpl["dataset"]["kwargs"]["segments"]
# We want to split the training time period into small segments.
perf_task_tpl["dataset"]["kwargs"]["segments"] = {
"train": (DatasetH.get_min_time(seg), DatasetH.get_max_time(seg)),
"test": (None, None),
}
# NOTE:
# we play a trick here
# treat the training segments as test to create the rolling tasks
rg = RollingGen(step=self.step, test_key="train", train_key=None, task_copy_func=deepcopy_basic_type)
gen_task = task_generator(perf_task_tpl, [rg])
recorders = R.list_recorders(experiment_name=self.exp_name)
if len(gen_task) == len(recorders):
get_module_logger("Internal Data").info("the data has been initialized")
else:
# train new models
assert 0 == len(recorders), "An empty experiment is required for setup `InternalData`"
trainer.train(gen_task)
# 2) extract the similarity matrix
label_df = self.dh.fetch(col_set="label")
# for
recorders = R.list_recorders(experiment_name=self.exp_name)
key_l = []
ic_l = []
for _, rec in tqdm(recorders.items(), desc="calc"):
pred = rec.load_object("pred.pkl")
task = rec.load_object("task")
data_key = task["dataset"]["kwargs"]["segments"]["train"]
key_l.append(data_key)
ic_l.append(delayed(self._calc_perf)(pred.iloc[:, 0], label_df.iloc[:, 0]))
ic_l = Parallel(n_jobs=-1)(ic_l)
self.data_ic_df = pd.DataFrame(dict(zip(key_l, ic_l)))
self.data_ic_df = self.data_ic_df.sort_index().sort_index(axis=1)
del self.dh # handler is not useful now
|
after running this function `self.data_ic_df` will become set.
Each col represents a data.
Each row represents the Timestamp of performance of that data.
For example,
.. code-block:: python
2021-06-21 2021-06-04 2021-05-21 2021-05-07 2021-04-20 2021-04-06 2021-03-22 2021-03-08 ...
2021-07-02 2021-06-18 2021-06-03 2021-05-20 2021-05-06 2021-04-19 2021-04-02 2021-03-19 ...
datetime ...
2018-01-02 0.079782 0.115975 0.070866 0.028849 -0.081170 0.140380 0.063864 0.110987 ...
2018-01-03 0.123386 0.107789 0.071037 0.045278 -0.060782 0.167446 0.089779 0.124476 ...
2018-01-04 0.140775 0.097206 0.063702 0.042415 -0.078164 0.173218 0.098914 0.114389 ...
2018-01-05 0.030320 -0.037209 -0.044536 -0.047267 -0.081888 0.045648 0.059947 0.047652 ...
2018-01-08 0.107201 0.009219 -0.015995 -0.036594 -0.086633 0.108965 0.122164 0.108508 ...
... ... ... ... ... ... ... ... ... ...
|
setup
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def update(self):
"""update the data for online trading"""
# TODO:
# when new data are totally(including label) available
# - update the prediction
# - update the data similarity map(if applied)
|
update the data for online trading
|
update
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def __init__(self, task: dict, meta_info: pd.DataFrame, mode: str = MetaTask.PROC_MODE_FULL, fill_method="max"):
"""
The description of the processed data
time_perf: A array with shape <hist_step_n * step, data pieces> -> data piece performance
time_belong: A array with shape <sample, data pieces> -> belong or not (1. or 0.)
array([[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.]])
Parameters
----------
meta_info: pd.DataFrame
please refer to the docs of _prepare_meta_ipt for detailed explanation.
"""
super().__init__(task, meta_info)
self.fill_method = fill_method
time_perf = self._get_processed_meta_info()
self.processed_meta_input = {"time_perf": time_perf}
# FIXME: memory issue in this step
if mode == MetaTask.PROC_MODE_FULL:
# process metainfo_
ds = self.get_dataset()
# these three lines occupied 70% of the time of initializing MetaTaskDS
d_train, d_test = ds.prepare(["train", "test"], col_set=["feature", "label"])
prev_size = d_test.shape[0]
d_train = d_train.dropna(axis=0)
d_test = d_test.dropna(axis=0)
if prev_size == 0 or d_test.shape[0] / prev_size <= 0.1:
raise ValueError(f"Most of samples are dropped. Please check this task: {task}")
assert (
d_test.groupby("datetime", group_keys=False).size().shape[0] >= 5
), "In this segment, this trading dates is less than 5, you'd better check the data."
sample_time_belong = np.zeros((d_train.shape[0], time_perf.shape[1]))
for i, col in enumerate(time_perf.columns):
# these two lines of code occupied 20% of the time of initializing MetaTaskDS
slc = slice(*d_train.index.slice_locs(start=col[0], end=col[1]))
sample_time_belong[slc, i] = 1.0
# If you want that last month also belongs to the last time_perf
# Assumptions: the latest data has similar performance like the last month
sample_time_belong[sample_time_belong.sum(axis=1) != 1, -1] = 1.0
self.processed_meta_input.update(
dict(
X=d_train["feature"],
y=d_train["label"].iloc[:, 0],
X_test=d_test["feature"],
y_test=d_test["label"].iloc[:, 0],
time_belong=sample_time_belong,
test_idx=d_test["label"].index,
)
)
# TODO: set device: I think this is not necessary to converting data format.
self.processed_meta_input = data_to_tensor(self.processed_meta_input)
|
The description of the processed data
time_perf: A array with shape <hist_step_n * step, data pieces> -> data piece performance
time_belong: A array with shape <sample, data pieces> -> belong or not (1. or 0.)
array([[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.]])
Parameters
----------
meta_info: pd.DataFrame
please refer to the docs of _prepare_meta_ipt for detailed explanation.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def __init__(
self,
*,
task_tpl: Union[dict, list],
step: int,
trunc_days: int = None,
rolling_ext_days: int = 0,
exp_name: Union[str, InternalData],
segments: Union[Dict[Text, Tuple], float, str],
hist_step_n: int = 10,
task_mode: str = MetaTask.PROC_MODE_FULL,
fill_method: str = "max",
):
"""
A dataset for meta model.
Parameters
----------
task_tpl : Union[dict, list]
Decide what tasks are used.
- dict : the task template, the prepared task is generated with `step`, `trunc_days` and `RollingGen`
- list : when list, use the list of tasks directly
the list is supposed to be sorted according timeline
step : int
the rolling step
trunc_days: int
days to be truncated based on the test start
rolling_ext_days: int
sometimes users want to train meta models for a longer test period but with smaller rolling steps for more task samples.
the total length of test periods will be `step + rolling_ext_days`
exp_name : Union[str, InternalData]
Decide what meta_info are used for prediction.
- str: the name of the experiment to store the performance of data
- InternalData: a prepared internal data
segments: Union[Dict[Text, Tuple], float]
if the segment is a Dict
the segments to divide data
both left and right are included
if segments is a float:
the float represents the percentage of data for training
if segments is a string:
it will try its best to put its data in training and ensure that the date `segments` is in the test set
hist_step_n: int
length of historical steps for the meta infomation
Number of steps of the data similarity information
task_mode : str
Please refer to the docs of MetaTask
"""
super().__init__(segments=segments)
if isinstance(exp_name, InternalData):
self.internal_data = exp_name
else:
self.internal_data = InternalData(task_tpl, step=step, exp_name=exp_name)
self.internal_data.setup()
self.task_tpl = deepcopy(task_tpl) # FIXME: if the handler is shared, how to avoid the explosion of the memroy.
self.trunc_days = trunc_days
self.hist_step_n = hist_step_n
self.step = step
if isinstance(task_tpl, dict):
rg = RollingGen(
step=step, trunc_days=trunc_days, task_copy_func=deepcopy_basic_type
) # NOTE: trunc_days is very important !!!!
task_iter = rg(task_tpl)
if rolling_ext_days > 0:
self.ta = TimeAdjuster(future=True)
for t in task_iter:
t["dataset"]["kwargs"]["segments"]["test"] = self.ta.shift(
t["dataset"]["kwargs"]["segments"]["test"], step=rolling_ext_days, rtype=RollingGen.ROLL_EX
)
if task_mode == MetaTask.PROC_MODE_FULL:
# Only pre initializing the task when full task is req
# initializing handler and share it.
init_task_handler(task_tpl)
else:
assert isinstance(task_tpl, list)
task_iter = task_tpl
self.task_list = []
self.meta_task_l = []
logger = get_module_logger("MetaDatasetDS")
logger.info(f"Example task for training meta model: {task_iter[0]}")
for t in tqdm(task_iter, desc="creating meta tasks"):
try:
self.meta_task_l.append(
MetaTaskDS(t, meta_info=self._prepare_meta_ipt(t), mode=task_mode, fill_method=fill_method)
)
self.task_list.append(t)
except ValueError as e:
logger.warning(f"ValueError: {e}")
assert len(self.meta_task_l) > 0, "No meta tasks found. Please check the data and setting"
|
A dataset for meta model.
Parameters
----------
task_tpl : Union[dict, list]
Decide what tasks are used.
- dict : the task template, the prepared task is generated with `step`, `trunc_days` and `RollingGen`
- list : when list, use the list of tasks directly
the list is supposed to be sorted according timeline
step : int
the rolling step
trunc_days: int
days to be truncated based on the test start
rolling_ext_days: int
sometimes users want to train meta models for a longer test period but with smaller rolling steps for more task samples.
the total length of test periods will be `step + rolling_ext_days`
exp_name : Union[str, InternalData]
Decide what meta_info are used for prediction.
- str: the name of the experiment to store the performance of data
- InternalData: a prepared internal data
segments: Union[Dict[Text, Tuple], float]
if the segment is a Dict
the segments to divide data
both left and right are included
if segments is a float:
the float represents the percentage of data for training
if segments is a string:
it will try its best to put its data in training and ensure that the date `segments` is in the test set
hist_step_n: int
length of historical steps for the meta infomation
Number of steps of the data similarity information
task_mode : str
Please refer to the docs of MetaTask
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def _prepare_meta_ipt(self, task) -> pd.DataFrame:
"""
Please refer to `self.internal_data.setup` for detailed information about `self.internal_data.data_ic_df`
Indices with format below can be successfully sliced by `ic_df.loc[:end, pd.IndexSlice[:, :end]]`
2021-06-21 2021-06-04 .. 2021-03-22 2021-03-08
2021-07-02 2021-06-18 .. 2021-04-02 None
Returns
-------
a pd.DataFrame with similar content below.
- each column corresponds to a trained model named by the training data range
- each row corresponds to a day of data tested by the models of the columns
- The rows cells that overlaps with the data used by columns are masked
2009-01-05 2009-02-09 ... 2011-04-27 2011-05-26
2009-02-06 2009-03-06 ... 2011-05-25 2011-06-23
datetime ...
2009-01-13 NaN 0.310639 ... -0.169057 0.137792
2009-01-14 NaN 0.261086 ... -0.143567 0.082581
... ... ... ... ... ...
2011-06-30 -0.054907 -0.020219 ... -0.023226 NaN
2011-07-01 -0.075762 -0.026626 ... -0.003167 NaN
"""
ic_df = self.internal_data.data_ic_df
segs = task["dataset"]["kwargs"]["segments"]
end = max(segs[k][1] for k in ("train", "valid") if k in segs)
ic_df_avail = ic_df.loc[:end, pd.IndexSlice[:, :end]]
# meta data set focus on the **information** instead of preprocess
# 1) filter the overlap info
def mask_overlap(s):
"""
mask overlap information
data after self.name[end] with self.trunc_days that contains future info are also considered as overlap info
Approximately the diagnal + horizon length of data are masked.
"""
start, end = s.name
end = get_date_by_shift(trading_date=end, shift=self.trunc_days - 1, future=True)
return s.mask((s.index >= start) & (s.index <= end))
ic_df_avail = ic_df_avail.apply(mask_overlap) # apply to each col
# 2) filter the info with too long periods
total_len = self.step * self.hist_step_n
if ic_df_avail.shape[0] >= total_len:
return ic_df_avail.iloc[-total_len:]
else:
raise ValueError("the history of distribution data is not long enough.")
|
Please refer to `self.internal_data.setup` for detailed information about `self.internal_data.data_ic_df`
Indices with format below can be successfully sliced by `ic_df.loc[:end, pd.IndexSlice[:, :end]]`
2021-06-21 2021-06-04 .. 2021-03-22 2021-03-08
2021-07-02 2021-06-18 .. 2021-04-02 None
Returns
-------
a pd.DataFrame with similar content below.
- each column corresponds to a trained model named by the training data range
- each row corresponds to a day of data tested by the models of the columns
- The rows cells that overlaps with the data used by columns are masked
2009-01-05 2009-02-09 ... 2011-04-27 2011-05-26
2009-02-06 2009-03-06 ... 2011-05-25 2011-06-23
datetime ...
2009-01-13 NaN 0.310639 ... -0.169057 0.137792
2009-01-14 NaN 0.261086 ... -0.143567 0.082581
... ... ... ... ... ...
2011-06-30 -0.054907 -0.020219 ... -0.023226 NaN
2011-07-01 -0.075762 -0.026626 ... -0.003167 NaN
|
_prepare_meta_ipt
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def mask_overlap(s):
"""
mask overlap information
data after self.name[end] with self.trunc_days that contains future info are also considered as overlap info
Approximately the diagnal + horizon length of data are masked.
"""
start, end = s.name
end = get_date_by_shift(trading_date=end, shift=self.trunc_days - 1, future=True)
return s.mask((s.index >= start) & (s.index <= end))
|
mask overlap information
data after self.name[end] with self.trunc_days that contains future info are also considered as overlap info
Approximately the diagnal + horizon length of data are masked.
|
mask_overlap
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/dataset.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/dataset.py
|
MIT
|
def __init__(
self,
step,
hist_step_n,
clip_method="tanh",
clip_weight=2.0,
criterion="ic_loss",
lr=0.0001,
max_epoch=100,
seed=43,
alpha=0.0,
loss_skip_thresh=50,
):
"""
loss_skip_size: int
The number of threshold to skip the loss calculation for each day.
"""
self.step = step
self.hist_step_n = hist_step_n
self.clip_method = clip_method
self.clip_weight = clip_weight
self.criterion = criterion
self.lr = lr
self.max_epoch = max_epoch
self.fitted = False
self.alpha = alpha
self.loss_skip_thresh = loss_skip_thresh
torch.manual_seed(seed)
|
loss_skip_size: int
The number of threshold to skip the loss calculation for each day.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/model.py
|
MIT
|
def fit(self, meta_dataset: MetaDatasetDS):
"""
The meta-learning-based data selection interacts directly with meta-dataset due to the close-form proxy measurement.
Parameters
----------
meta_dataset : MetaDatasetDS
The meta-model takes the meta-dataset for its training process.
"""
if not self.fitted:
for k in set(["lr", "step", "hist_step_n", "clip_method", "clip_weight", "criterion", "max_epoch"]):
R.log_params(**{k: getattr(self, k)})
# FIXME: get test tasks for just checking the performance
phases = ["train", "test"]
meta_tasks_l = meta_dataset.prepare_tasks(phases)
if len(meta_tasks_l[1]):
R.log_params(
**dict(proxy_test_begin=meta_tasks_l[1][0].task["dataset"]["kwargs"]["segments"]["test"])
) # debug: record when the test phase starts
self.tn = PredNet(
step=self.step,
hist_step_n=self.hist_step_n,
clip_weight=self.clip_weight,
clip_method=self.clip_method,
alpha=self.alpha,
)
opt = optim.Adam(self.tn.parameters(), lr=self.lr)
# run weight with no weight
for phase, task_list in zip(phases, meta_tasks_l):
self.run_epoch(f"{phase}_noweight", task_list, 0, opt, {}, ignore_weight=True)
self.run_epoch(f"{phase}_init", task_list, 0, opt, {})
# run training
loss_l = {}
for epoch in tqdm(range(self.max_epoch), desc="epoch"):
for phase, task_list in zip(phases, meta_tasks_l):
self.run_epoch(phase, task_list, epoch, opt, loss_l)
R.save_objects(**{"model.pkl": self.tn})
self.fitted = True
|
The meta-learning-based data selection interacts directly with meta-dataset due to the close-form proxy measurement.
Parameters
----------
meta_dataset : MetaDatasetDS
The meta-model takes the meta-dataset for its training process.
|
fit
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/model.py
|
MIT
|
def __init__(self, step, hist_step_n, clip_weight=None, clip_method="tanh", alpha: float = 0.0):
"""
Parameters
----------
alpha : float
the regularization for sub model (useful when align meta model with linear submodel)
"""
super().__init__()
self.step = step
self.twm = TimeWeightMeta(hist_step_n=hist_step_n, clip_weight=clip_weight, clip_method=clip_method)
self.init_paramters(hist_step_n)
self.alpha = alpha
|
Parameters
----------
alpha : float
the regularization for sub model (useful when align meta model with linear submodel)
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/net.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/net.py
|
MIT
|
def forward(self, X, y, time_perf, time_belong, X_test, ignore_weight=False):
"""Please refer to the docs of MetaTaskDS for the description of the variables"""
weights = self.get_sample_weights(X, time_perf, time_belong, ignore_weight=ignore_weight)
X_w = X.T * weights.view(1, -1)
theta = torch.inverse(X_w @ X + self.alpha * torch.eye(X_w.shape[0])) @ X_w @ y
return X_test @ theta, weights
|
Please refer to the docs of MetaTaskDS for the description of the variables
|
forward
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/net.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/net.py
|
MIT
|
def forward(self, pred, y, idx):
"""forward.
FIXME:
- Some times it will be a slightly different from the result from `pandas.corr()`
- It may be caused by the precision problem of model;
:param pred:
:param y:
:param idx: Assume the level of the idx is (date, inst), and it is sorted
"""
prev = None
diff_point = []
for i, (date, inst) in enumerate(idx):
if date != prev:
diff_point.append(i)
prev = date
diff_point.append(None)
# The lengths of diff_point will be one more larger then diff_point
ic_all = 0.0
skip_n = 0
for start_i, end_i in zip(diff_point, diff_point[1:]):
pred_focus = pred[start_i:end_i] # TODO: just for fake
if pred_focus.shape[0] < self.skip_size:
# skip some days which have very small amount of stock.
skip_n += 1
continue
y_focus = y[start_i:end_i]
if pred_focus.std() < EPS or y_focus.std() < EPS:
# These cases often happend at the end of test data.
# Usually caused by fillna(0.)
skip_n += 1
continue
ic_day = torch.dot(
(pred_focus - pred_focus.mean()) / np.sqrt(pred_focus.shape[0]) / pred_focus.std(),
(y_focus - y_focus.mean()) / np.sqrt(y_focus.shape[0]) / y_focus.std(),
)
ic_all += ic_day
if len(diff_point) - 1 - skip_n <= 0:
__import__("ipdb").set_trace()
raise ValueError("No enough data for calculating IC")
if skip_n > 0:
get_module_logger("ICLoss").info(
f"{skip_n} days are skipped due to zero std or small scale of valid samples."
)
ic_mean = ic_all / (len(diff_point) - 1 - skip_n)
return -ic_mean # ic loss
|
forward.
FIXME:
- Some times it will be a slightly different from the result from `pandas.corr()`
- It may be caused by the precision problem of model;
:param pred:
:param y:
:param idx: Assume the level of the idx is (date, inst), and it is sorted
|
forward
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/utils.py
|
MIT
|
def preds_to_weight_with_clamp(preds, clip_weight=None, clip_method="tanh"):
"""
Clip the weights.
Parameters
----------
clip_weight: float
The clip threshold.
clip_method: str
The clip method. Current available: "clamp", "tanh", and "sigmoid".
"""
if clip_weight is not None:
if clip_method == "clamp":
weights = torch.exp(preds)
weights = weights.clamp(1.0 / clip_weight, clip_weight)
elif clip_method == "tanh":
weights = torch.exp(torch.tanh(preds) * np.log(clip_weight))
elif clip_method == "sigmoid":
# intuitively assume its sum is 1
if clip_weight == 0.0:
weights = torch.ones_like(preds)
else:
sm = nn.Sigmoid()
weights = sm(preds) * clip_weight # TODO: The clip_weight is useless here.
weights = weights / torch.sum(weights) * weights.numel()
else:
raise ValueError("Unknown clip_method")
else:
weights = torch.exp(preds)
return weights
|
Clip the weights.
Parameters
----------
clip_weight: float
The clip threshold.
clip_method: str
The clip method. Current available: "clamp", "tanh", and "sigmoid".
|
preds_to_weight_with_clamp
|
python
|
microsoft/qlib
|
qlib/contrib/meta/data_selection/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/meta/data_selection/utils.py
|
MIT
|
def get_feature_importance(self, *args, **kwargs) -> pd.Series:
"""get feature importance
Notes
-----
parameters references:
https://catboost.ai/docs/concepts/python-reference_catboost_get_feature_importance.html#python-reference_catboost_get_feature_importance
"""
return pd.Series(
data=self.model.get_feature_importance(*args, **kwargs), index=self.model.feature_names_
).sort_values(ascending=False)
|
get feature importance
Notes
-----
parameters references:
https://catboost.ai/docs/concepts/python-reference_catboost_get_feature_importance.html#python-reference_catboost_get_feature_importance
|
get_feature_importance
|
python
|
microsoft/qlib
|
qlib/contrib/model/catboost_model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/catboost_model.py
|
MIT
|
def sample_reweight(self, loss_curve, loss_values, k_th):
"""
the SR module of Double Ensemble
:param loss_curve: the shape is NxT
the loss curve for the previous sub-model, where the element (i, t) if the error on the i-th sample
after the t-th iteration in the training of the previous sub-model.
:param loss_values: the shape is N
the loss of the current ensemble on the i-th sample.
:param k_th: the index of the current sub-model, starting from 1
:return: weights
the weights for all the samples.
"""
# normalize loss_curve and loss_values with ranking
loss_curve_norm = loss_curve.rank(axis=0, pct=True)
loss_values_norm = (-loss_values).rank(pct=True)
# calculate l_start and l_end from loss_curve
N, T = loss_curve.shape
part = np.maximum(int(T * 0.1), 1)
l_start = loss_curve_norm.iloc[:, :part].mean(axis=1)
l_end = loss_curve_norm.iloc[:, -part:].mean(axis=1)
# calculate h-value for each sample
h1 = loss_values_norm
h2 = (l_end / l_start).rank(pct=True)
h = pd.DataFrame({"h_value": self.alpha1 * h1 + self.alpha2 * h2})
# calculate weights
h["bins"] = pd.cut(h["h_value"], self.bins_sr)
h_avg = h.groupby("bins", group_keys=False, observed=False)["h_value"].mean()
weights = pd.Series(np.zeros(N, dtype=float))
for b in h_avg.index:
weights[h["bins"] == b] = 1.0 / (self.decay**k_th * h_avg[b] + 0.1)
return weights
|
the SR module of Double Ensemble
:param loss_curve: the shape is NxT
the loss curve for the previous sub-model, where the element (i, t) if the error on the i-th sample
after the t-th iteration in the training of the previous sub-model.
:param loss_values: the shape is N
the loss of the current ensemble on the i-th sample.
:param k_th: the index of the current sub-model, starting from 1
:return: weights
the weights for all the samples.
|
sample_reweight
|
python
|
microsoft/qlib
|
qlib/contrib/model/double_ensemble.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/double_ensemble.py
|
MIT
|
def feature_selection(self, df_train, loss_values):
"""
the FS module of Double Ensemble
:param df_train: the shape is NxF
:param loss_values: the shape is N
the loss of the current ensemble on the i-th sample.
:return: res_feat: in the form of pandas.Index
"""
x_train, y_train = df_train["feature"], df_train["label"]
features = x_train.columns
N, F = x_train.shape
g = pd.DataFrame({"g_value": np.zeros(F, dtype=float)})
M = len(self.ensemble)
# shuffle specific columns and calculate g-value for each feature
x_train_tmp = x_train.copy()
for i_f, feat in enumerate(features):
x_train_tmp.loc[:, feat] = np.random.permutation(x_train_tmp.loc[:, feat].values)
pred = pd.Series(np.zeros(N), index=x_train_tmp.index)
for i_s, submodel in enumerate(self.ensemble):
pred += (
pd.Series(
submodel.predict(x_train_tmp.loc[:, self.sub_features[i_s]].values), index=x_train_tmp.index
)
/ M
)
loss_feat = self.get_loss(y_train.values.squeeze(), pred.values)
g.loc[i_f, "g_value"] = np.mean(loss_feat - loss_values) / (np.std(loss_feat - loss_values) + 1e-7)
x_train_tmp.loc[:, feat] = x_train.loc[:, feat].copy()
# one column in train features is all-nan # if g['g_value'].isna().any()
g["g_value"].replace(np.nan, 0, inplace=True)
# divide features into bins_fs bins
g["bins"] = pd.cut(g["g_value"], self.bins_fs)
# randomly sample features from bins to construct the new features
res_feat = []
sorted_bins = sorted(g["bins"].unique(), reverse=True)
for i_b, b in enumerate(sorted_bins):
b_feat = features[g["bins"] == b]
num_feat = int(np.ceil(self.sample_ratios[i_b] * len(b_feat)))
res_feat = res_feat + np.random.choice(b_feat, size=num_feat, replace=False).tolist()
return pd.Index(set(res_feat))
|
the FS module of Double Ensemble
:param df_train: the shape is NxF
:param loss_values: the shape is N
the loss of the current ensemble on the i-th sample.
:return: res_feat: in the form of pandas.Index
|
feature_selection
|
python
|
microsoft/qlib
|
qlib/contrib/model/double_ensemble.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/double_ensemble.py
|
MIT
|
def get_feature_importance(self, *args, **kwargs) -> pd.Series:
"""get feature importance
Notes
-----
parameters reference:
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html?highlight=feature_importance#lightgbm.Booster.feature_importance
"""
res = []
for _model, _weight in zip(self.ensemble, self.sub_weights):
res.append(pd.Series(_model.feature_importance(*args, **kwargs), index=_model.feature_name()) * _weight)
return pd.concat(res, axis=1, sort=False).sum(axis=1).sort_values(ascending=False)
|
get feature importance
Notes
-----
parameters reference:
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html?highlight=feature_importance#lightgbm.Booster.feature_importance
|
get_feature_importance
|
python
|
microsoft/qlib
|
qlib/contrib/model/double_ensemble.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/double_ensemble.py
|
MIT
|
def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:
"""
The motivation of current version is to make validation optional
- train segment is necessary;
"""
ds_l = []
assert "train" in dataset.segments
for key in ["train", "valid"]:
if key in dataset.segments:
df = dataset.prepare(key, col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
if df.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
x, y = df["feature"], df["label"]
# Lightgbm need 1D array as its label
if y.values.ndim == 2 and y.values.shape[1] == 1:
y = np.squeeze(y.values)
else:
raise ValueError("LightGBM doesn't support multi-label training")
if reweighter is None:
w = None
elif isinstance(reweighter, Reweighter):
w = reweighter.reweight(df)
else:
raise ValueError("Unsupported reweighter type.")
ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))
return ds_l
|
The motivation of current version is to make validation optional
- train segment is necessary;
|
_prepare_data
|
python
|
microsoft/qlib
|
qlib/contrib/model/gbdt.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/gbdt.py
|
MIT
|
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632
if dtrain.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
callbacks=[verbose_eval_callback],
)
|
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
|
finetune
|
python
|
microsoft/qlib
|
qlib/contrib/model/gbdt.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/gbdt.py
|
MIT
|
def _cal_signal_metrics(self, y_test, l_cut, r_cut):
"""
Calcaute the signal metrics by daily level
"""
up_pre, down_pre = [], []
up_alpha_ll, down_alpha_ll = [], []
for date in y_test.index.get_level_values(0).unique():
df_res = y_test.loc[date].sort_values("pred")
if int(l_cut * len(df_res)) < 10:
warnings.warn("Warning: threhold is too low or instruments number is not enough")
continue
top = df_res.iloc[: int(l_cut * len(df_res))]
bottom = df_res.iloc[int(r_cut * len(df_res)) :]
down_precision = len(top[top[top.columns[0]] < 0]) / (len(top))
up_precision = len(bottom[bottom[top.columns[0]] > 0]) / (len(bottom))
down_alpha = top[top.columns[0]].mean()
up_alpha = bottom[bottom.columns[0]].mean()
up_pre.append(up_precision)
down_pre.append(down_precision)
up_alpha_ll.append(up_alpha)
down_alpha_ll.append(down_alpha)
return (
np.array(up_pre).mean(),
np.array(down_pre).mean(),
np.array(up_alpha_ll).mean(),
np.array(down_alpha_ll).mean(),
)
|
Calcaute the signal metrics by daily level
|
_cal_signal_metrics
|
python
|
microsoft/qlib
|
qlib/contrib/model/highfreq_gdbt_model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/highfreq_gdbt_model.py
|
MIT
|
def hf_signal_test(self, dataset: DatasetH, threhold=0.2):
"""
Test the signal in high frequency test set
"""
if self.model is None:
raise ValueError("Model hasn't been trained yet")
df_test = dataset.prepare("test", col_set=["feature", "label"], data_key=DataHandlerLP.DK_I)
df_test.dropna(inplace=True)
x_test, y_test = df_test["feature"], df_test["label"]
# Convert label into alpha
y_test[y_test.columns[0]] = y_test[y_test.columns[0]] - y_test[y_test.columns[0]].mean(level=0)
res = pd.Series(self.model.predict(x_test.values), index=x_test.index)
y_test["pred"] = res
up_p, down_p, up_a, down_a = self._cal_signal_metrics(y_test, threhold, 1 - threhold)
print("===============================")
print("High frequency signal test")
print("===============================")
print("Test set precision: ")
print("Positive precision: {}, Negative precision: {}".format(up_p, down_p))
print("Test Alpha Average in test set: ")
print("Positive average alpha: {}, Negative average alpha: {}".format(up_a, down_a))
|
Test the signal in high frequency test set
|
hf_signal_test
|
python
|
microsoft/qlib
|
qlib/contrib/model/highfreq_gdbt_model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/highfreq_gdbt_model.py
|
MIT
|
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset)
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
callbacks=[verbose_eval_callback],
)
|
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
|
finetune
|
python
|
microsoft/qlib
|
qlib/contrib/model/highfreq_gdbt_model.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/highfreq_gdbt_model.py
|
MIT
|
def __init__(self, estimator="ols", alpha=0.0, fit_intercept=False, include_valid: bool = False):
"""
Parameters
----------
estimator : str
which estimator to use for linear regression
alpha : float
l1 or l2 regularization parameter
fit_intercept : bool
whether fit intercept
include_valid: bool
Should the validation data be included for training?
The validation data should be included
"""
assert estimator in [self.OLS, self.NNLS, self.RIDGE, self.LASSO], f"unsupported estimator `{estimator}`"
self.estimator = estimator
assert alpha == 0 or estimator in [self.RIDGE, self.LASSO], f"alpha is only supported in `ridge`&`lasso`"
self.alpha = alpha
self.fit_intercept = fit_intercept
self.coef_ = None
self.include_valid = include_valid
|
Parameters
----------
estimator : str
which estimator to use for linear regression
alpha : float
l1 or l2 regularization parameter
fit_intercept : bool
whether fit intercept
include_valid: bool
Should the validation data be included for training?
The validation data should be included
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/linear.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/linear.py
|
MIT
|
def calc_all_metrics(pred):
"""pred is a pandas dataframe that has two attributes: score (pred) and label (real)"""
res = {}
ic = pred.groupby(level="datetime", group_keys=False).apply(lambda x: x.label.corr(x.score))
rank_ic = pred.groupby(level="datetime", group_keys=False).apply(
lambda x: x.label.corr(x.score, method="spearman")
)
res["ic"] = ic.mean()
res["icir"] = ic.mean() / ic.std()
res["ric"] = rank_ic.mean()
res["ricir"] = rank_ic.mean() / rank_ic.std()
res["mse"] = -(pred["label"] - pred["score"]).mean()
res["loss"] = res["mse"]
return res
|
pred is a pandas dataframe that has two attributes: score (pred) and label (real)
|
calc_all_metrics
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_adarnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_adarnn.py
|
MIT
|
def __init__(self, loss_type="cosine", input_dim=512, GPU=0):
"""
Supported loss_type: mmd(mmd_lin), mmd_rbf, coral, cosine, kl, js, mine, adv
"""
self.loss_type = loss_type
self.input_dim = input_dim
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
|
Supported loss_type: mmd(mmd_lin), mmd_rbf, coral, cosine, kl, js, mine, adv
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_adarnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_adarnn.py
|
MIT
|
def compute(self, X, Y):
"""Compute adaptation loss
Arguments:
X {tensor} -- source matrix
Y {tensor} -- target matrix
Returns:
[tensor] -- transfer loss
"""
loss = None
if self.loss_type in ("mmd_lin", "mmd"):
mmdloss = MMD_loss(kernel_type="linear")
loss = mmdloss(X, Y)
elif self.loss_type == "coral":
loss = CORAL(X, Y, self.device)
elif self.loss_type in ("cosine", "cos"):
loss = 1 - cosine(X, Y)
elif self.loss_type == "kl":
loss = kl_div(X, Y)
elif self.loss_type == "js":
loss = js(X, Y)
elif self.loss_type == "mine":
mine_model = Mine_estimator(input_dim=self.input_dim, hidden_dim=60).to(self.device)
loss = mine_model(X, Y)
elif self.loss_type == "adv":
loss = adv(X, Y, self.device, input_dim=self.input_dim, hidden_dim=32)
elif self.loss_type == "mmd_rbf":
mmdloss = MMD_loss(kernel_type="rbf")
loss = mmdloss(X, Y)
elif self.loss_type == "pairwise":
pair_mat = pairwise_dist(X, Y)
loss = torch.norm(pair_mat)
return loss
|
Compute adaptation loss
Arguments:
X {tensor} -- source matrix
Y {tensor} -- target matrix
Returns:
[tensor] -- transfer loss
|
compute
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_adarnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_adarnn.py
|
MIT
|
def __init__(self, gamma=0.1, gamma_clip=0.4, *args, **kwargs):
"""
A gradient reversal layer.
This layer has no parameters, and simply reverses the gradient
in the backward pass.
"""
super().__init__(*args, **kwargs)
self.gamma = gamma
self.gamma_clip = torch.tensor(float(gamma_clip), requires_grad=False)
self._alpha = torch.tensor(0, requires_grad=False)
self._p = 0
|
A gradient reversal layer.
This layer has no parameters, and simply reverses the gradient
in the backward pass.
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_add.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_add.py
|
MIT
|
def _get_fl(self, data: torch.Tensor):
"""
get feature and label from data
- Handle the different data shape of time series and tabular data
Parameters
----------
data : torch.Tensor
input data which maybe 3 dimension or 2 dimension
- 3dim: [batch_size, time_step, feature_dim]
- 2dim: [batch_size, feature_dim]
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
"""
if data.dim() == 3:
# it is a time series dataset
feature = data[:, :, 0:-1].to(self.device)
label = data[:, -1, -1].to(self.device)
elif data.dim() == 2:
# it is a tabular dataset
feature = data[:, 0:-1].to(self.device)
label = data[:, -1].to(self.device)
else:
raise ValueError("Unsupported data shape.")
return feature, label
|
get feature and label from data
- Handle the different data shape of time series and tabular data
Parameters
----------
data : torch.Tensor
input data which maybe 3 dimension or 2 dimension
- 3dim: [batch_size, time_step, feature_dim]
- 2dim: [batch_size, feature_dim]
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
|
_get_fl
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_general_nn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_general_nn.py
|
MIT
|
def __init__(self, input_dim, output_dim, kernel_size, device):
"""Build a basic CNN encoder
Parameters
----------
input_dim : int
The input dimension
output_dim : int
The output dimension
kernel_size : int
The size of convolutional kernels
"""
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.kernel_size = kernel_size
self.device = device
# set padding to ensure the same length
# it is correct only when kernel_size is odd, dilation is 1, stride is 1
self.conv = nn.Conv1d(input_dim, output_dim, kernel_size, padding=(kernel_size - 1) // 2)
|
Build a basic CNN encoder
Parameters
----------
input_dim : int
The input dimension
output_dim : int
The output dimension
kernel_size : int
The size of convolutional kernels
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def forward(self, x):
"""
Parameters
----------
x : torch.Tensor
input data
Returns
-------
torch.Tensor
Updated representations
"""
# input shape: [batch_size, seq_len*input_dim]
# output shape: [batch_size, seq_len, input_dim]
x = x.view(x.shape[0], -1, self.input_dim).permute(0, 2, 1).to(self.device)
y = self.conv(x) # [batch_size, output_dim, conved_seq_len]
y = y.permute(0, 2, 1) # [batch_size, conved_seq_len, output_dim]
return y
|
Parameters
----------
x : torch.Tensor
input data
Returns
-------
torch.Tensor
Updated representations
|
forward
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def __init__(self, input_dim, output_dim, dup_num, rnn_layers, dropout, device):
"""Build K parallel RNNs
Parameters
----------
input_dim : int
The input dimension
output_dim : int
The output dimension
dup_num : int
The number of parallel RNNs
rnn_layers: int
The number of RNN layers
"""
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dup_num = dup_num
self.rnn_layers = rnn_layers
self.dropout = dropout
self.device = device
self.rnn_modules = nn.ModuleList()
for _ in range(dup_num):
self.rnn_modules.append(nn.GRU(input_dim, output_dim, num_layers=self.rnn_layers, dropout=dropout))
|
Build K parallel RNNs
Parameters
----------
input_dim : int
The input dimension
output_dim : int
The output dimension
dup_num : int
The number of parallel RNNs
rnn_layers: int
The number of RNN layers
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def forward(self, x):
"""
Parameters
----------
x : torch.Tensor
Input data
n_id : torch.Tensor
Node indices
Returns
-------
torch.Tensor
Updated representations
"""
# input shape: [batch_size, seq_len, input_dim]
# output shape: [batch_size, seq_len, output_dim]
# [seq_len, batch_size, input_dim]
batch_size, seq_len, input_dim = x.shape
x = x.permute(1, 0, 2).to(self.device)
hids = []
for rnn in self.rnn_modules:
h, _ = rnn(x) # [seq_len, batch_size, output_dim]
hids.append(h)
# [seq_len, batch_size, output_dim, num_dups]
hids = torch.stack(hids, dim=-1)
hids = hids.view(seq_len, batch_size, self.output_dim, self.dup_num)
hids = hids.mean(dim=3)
hids = hids.permute(1, 0, 2)
return hids
|
Parameters
----------
x : torch.Tensor
Input data
n_id : torch.Tensor
Node indices
Returns
-------
torch.Tensor
Updated representations
|
forward
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def __init__(
self, cnn_input_dim, cnn_output_dim, cnn_kernel_size, rnn_output_dim, rnn_dup_num, rnn_layers, dropout, device
):
"""Build an encoder composed of CNN and KRNN
Parameters
----------
cnn_input_dim : int
The input dimension of CNN
cnn_output_dim : int
The output dimension of CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_output_dim : int
The output dimension of KRNN
rnn_dup_num : int
The number of parallel duplicates for KRNN
rnn_layers : int
The number of RNN layers
"""
super().__init__()
self.cnn_encoder = CNNEncoderBase(cnn_input_dim, cnn_output_dim, cnn_kernel_size, device)
self.krnn_encoder = KRNNEncoderBase(cnn_output_dim, rnn_output_dim, rnn_dup_num, rnn_layers, dropout, device)
|
Build an encoder composed of CNN and KRNN
Parameters
----------
cnn_input_dim : int
The input dimension of CNN
cnn_output_dim : int
The output dimension of CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_output_dim : int
The output dimension of KRNN
rnn_dup_num : int
The number of parallel duplicates for KRNN
rnn_layers : int
The number of RNN layers
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def forward(self, x):
"""
Parameters
----------
x : torch.Tensor
Input data
n_id : torch.Tensor
Node indices
Returns
-------
torch.Tensor
Updated representations
"""
cnn_out = self.cnn_encoder(x)
krnn_out = self.krnn_encoder(cnn_out)
return krnn_out
|
Parameters
----------
x : torch.Tensor
Input data
n_id : torch.Tensor
Node indices
Returns
-------
torch.Tensor
Updated representations
|
forward
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def __init__(self, fea_dim, cnn_dim, cnn_kernel_size, rnn_dim, rnn_dups, rnn_layers, dropout, device, **params):
"""Build a KRNN model
Parameters
----------
fea_dim : int
The feature dimension
cnn_dim : int
The hidden dimension of CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_dim : int
The hidden dimension of KRNN
rnn_dups : int
The number of parallel duplicates
rnn_layers: int
The number of RNN layers
"""
super().__init__()
self.encoder = CNNKRNNEncoder(
cnn_input_dim=fea_dim,
cnn_output_dim=cnn_dim,
cnn_kernel_size=cnn_kernel_size,
rnn_output_dim=rnn_dim,
rnn_dup_num=rnn_dups,
rnn_layers=rnn_layers,
dropout=dropout,
device=device,
)
self.out_fc = nn.Linear(rnn_dim, 1)
self.device = device
|
Build a KRNN model
Parameters
----------
fea_dim : int
The feature dimension
cnn_dim : int
The hidden dimension of CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_dim : int
The hidden dimension of KRNN
rnn_dups : int
The number of parallel duplicates
rnn_layers: int
The number of RNN layers
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_krnn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_krnn.py
|
MIT
|
def _nn_predict(self, data, return_cpu=True):
"""Reusing predicting NN.
Scenarios
1) test inference (data may come from CPU and expect the output data is on CPU)
2) evaluation on training (data may come from GPU)
"""
if not isinstance(data, torch.Tensor):
if isinstance(data, pd.DataFrame):
data = data.values
data = torch.Tensor(data)
data = data.to(self.device)
preds = []
self.dnn_model.eval()
with torch.no_grad():
batch_size = 8096
for i in range(0, len(data), batch_size):
x = data[i : i + batch_size]
preds.append(self.dnn_model(x.to(self.device)).detach().reshape(-1))
if return_cpu:
preds = np.concatenate([pr.cpu().numpy() for pr in preds])
else:
preds = torch.cat(preds, axis=0)
return preds
|
Reusing predicting NN.
Scenarios
1) test inference (data may come from CPU and expect the output data is on CPU)
2) evaluation on training (data may come from GPU)
|
_nn_predict
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_nn.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_nn.py
|
MIT
|
def __init__(
self,
fea_dim,
cnn_dim_1,
cnn_dim_2,
cnn_kernel_size,
rnn_dim_1,
rnn_dim_2,
rnn_dups,
rnn_layers,
dropout,
device,
**params,
):
"""Build a Sandwich model
Parameters
----------
fea_dim : int
The feature dimension
cnn_dim_1 : int
The hidden dimension of the first CNN
cnn_dim_2 : int
The hidden dimension of the second CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_dim_1 : int
The hidden dimension of the first KRNN
rnn_dim_2 : int
The hidden dimension of the second KRNN
rnn_dups : int
The number of parallel duplicates
rnn_layers: int
The number of RNN layers
"""
super().__init__()
self.first_encoder = CNNKRNNEncoder(
cnn_input_dim=fea_dim,
cnn_output_dim=cnn_dim_1,
cnn_kernel_size=cnn_kernel_size,
rnn_output_dim=rnn_dim_1,
rnn_dup_num=rnn_dups,
rnn_layers=rnn_layers,
dropout=dropout,
device=device,
)
self.second_encoder = CNNKRNNEncoder(
cnn_input_dim=rnn_dim_1,
cnn_output_dim=cnn_dim_2,
cnn_kernel_size=cnn_kernel_size,
rnn_output_dim=rnn_dim_2,
rnn_dup_num=rnn_dups,
rnn_layers=rnn_layers,
dropout=dropout,
device=device,
)
self.out_fc = nn.Linear(rnn_dim_2, 1)
self.device = device
|
Build a Sandwich model
Parameters
----------
fea_dim : int
The feature dimension
cnn_dim_1 : int
The hidden dimension of the first CNN
cnn_dim_2 : int
The hidden dimension of the second CNN
cnn_kernel_size : int
The size of convolutional kernels
rnn_dim_1 : int
The hidden dimension of the first KRNN
rnn_dim_2 : int
The hidden dimension of the second KRNN
rnn_dups : int
The number of parallel duplicates
rnn_layers: int
The number of RNN layers
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_sandwich.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_sandwich.py
|
MIT
|
def __init__(
self,
d_feat=158,
out_dim=64,
final_out_dim=1,
batch_size=4096,
n_d=64,
n_a=64,
n_shared=2,
n_ind=2,
n_steps=5,
n_epochs=100,
pretrain_n_epochs=50,
relax=1.3,
vbs=2048,
seed=993,
optimizer="adam",
loss="mse",
metric="",
early_stop=20,
GPU=0,
pretrain_loss="custom",
ps=0.3,
lr=0.01,
pretrain=True,
pretrain_file=None,
):
"""
TabNet model for Qlib
Args:
ps: probability to generate the bernoulli mask
"""
# set hyper-parameters.
self.d_feat = d_feat
self.out_dim = out_dim
self.final_out_dim = final_out_dim
self.lr = lr
self.batch_size = batch_size
self.optimizer = optimizer.lower()
self.pretrain_loss = pretrain_loss
self.seed = seed
self.ps = ps
self.n_epochs = n_epochs
self.logger = get_module_logger("TabNet")
self.pretrain_n_epochs = pretrain_n_epochs
self.device = "cuda:%s" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu"
self.loss = loss
self.metric = metric
self.early_stop = early_stop
self.pretrain = pretrain
self.pretrain_file = get_or_create_path(pretrain_file)
self.logger.info(
"TabNet:"
"\nbatch_size : {}"
"\nvirtual bs : {}"
"\ndevice : {}"
"\npretrain: {}".format(self.batch_size, vbs, self.device, self.pretrain)
)
self.fitted = False
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.tabnet_model = TabNet(inp_dim=self.d_feat, out_dim=self.out_dim, vbs=vbs, relax=relax).to(self.device)
self.tabnet_decoder = TabNet_Decoder(self.out_dim, self.d_feat, n_shared, n_ind, vbs, n_steps).to(self.device)
self.logger.info("model:\n{:}\n{:}".format(self.tabnet_model, self.tabnet_decoder))
self.logger.info("model size: {:.4f} MB".format(count_parameters([self.tabnet_model, self.tabnet_decoder])))
if optimizer.lower() == "adam":
self.pretrain_optimizer = optim.Adam(
list(self.tabnet_model.parameters()) + list(self.tabnet_decoder.parameters()), lr=self.lr
)
self.train_optimizer = optim.Adam(self.tabnet_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
self.pretrain_optimizer = optim.SGD(
list(self.tabnet_model.parameters()) + list(self.tabnet_decoder.parameters()), lr=self.lr
)
self.train_optimizer = optim.SGD(self.tabnet_model.parameters(), lr=self.lr)
else:
raise NotImplementedError("optimizer {} is not supported!".format(optimizer))
|
TabNet model for Qlib
Args:
ps: probability to generate the bernoulli mask
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tabnet.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tabnet.py
|
MIT
|
def pretrain_loss_fn(self, f_hat, f, S):
"""
Pretrain loss function defined in the original paper, read "Tabular self-supervised learning" in https://arxiv.org/pdf/1908.07442.pdf
"""
down_mean = torch.mean(f, dim=0)
down = torch.sqrt(torch.sum(torch.square(f - down_mean), dim=0))
up = (f_hat - f) * S
return torch.sum(torch.square(up / down))
|
Pretrain loss function defined in the original paper, read "Tabular self-supervised learning" in https://arxiv.org/pdf/1908.07442.pdf
|
pretrain_loss_fn
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tabnet.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tabnet.py
|
MIT
|
def __init__(self, inp_dim, out_dim, n_shared, n_ind, vbs, n_steps):
"""
TabNet decoder that is used in pre-training
"""
super().__init__()
self.out_dim = out_dim
if n_shared > 0:
self.shared = nn.ModuleList()
self.shared.append(nn.Linear(inp_dim, 2 * out_dim))
for x in range(n_shared - 1):
self.shared.append(nn.Linear(out_dim, 2 * out_dim)) # preset the linear function we will use
else:
self.shared = None
self.n_steps = n_steps
self.steps = nn.ModuleList()
for x in range(n_steps):
self.steps.append(DecoderStep(inp_dim, out_dim, self.shared, n_ind, vbs))
|
TabNet decoder that is used in pre-training
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tabnet.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tabnet.py
|
MIT
|
def __init__(self, inp_dim=6, out_dim=6, n_d=64, n_a=64, n_shared=2, n_ind=2, n_steps=5, relax=1.2, vbs=1024):
"""
TabNet AKA the original encoder
Args:
n_d: dimension of the features used to calculate the final results
n_a: dimension of the features input to the attention transformer of the next step
n_shared: numbr of shared steps in feature transformer(optional)
n_ind: number of independent steps in feature transformer
n_steps: number of steps of pass through tabbet
relax coefficient:
virtual batch size:
"""
super().__init__()
# set the number of shared step in feature transformer
if n_shared > 0:
self.shared = nn.ModuleList()
self.shared.append(nn.Linear(inp_dim, 2 * (n_d + n_a)))
for x in range(n_shared - 1):
self.shared.append(nn.Linear(n_d + n_a, 2 * (n_d + n_a))) # preset the linear function we will use
else:
self.shared = None
self.first_step = FeatureTransformer(inp_dim, n_d + n_a, self.shared, n_ind, vbs)
self.steps = nn.ModuleList()
for x in range(n_steps - 1):
self.steps.append(DecisionStep(inp_dim, n_d, n_a, self.shared, n_ind, relax, vbs))
self.fc = nn.Linear(n_d, out_dim)
self.bn = nn.BatchNorm1d(inp_dim, momentum=0.01)
self.n_d = n_d
|
TabNet AKA the original encoder
Args:
n_d: dimension of the features used to calculate the final results
n_a: dimension of the features input to the attention transformer of the next step
n_shared: numbr of shared steps in feature transformer(optional)
n_ind: number of independent steps in feature transformer
n_steps: number of steps of pass through tabbet
relax coefficient:
virtual batch size:
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tabnet.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tabnet.py
|
MIT
|
def shoot_infs(inp_tensor):
"""Replaces inf by maximum of tensor"""
mask_inf = torch.isinf(inp_tensor)
ind_inf = torch.nonzero(mask_inf, as_tuple=False)
if len(ind_inf) > 0:
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = 0
elif len(ind) == 1:
inp_tensor[ind[0]] = 0
m = torch.max(inp_tensor)
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = m
elif len(ind) == 1:
inp_tensor[ind[0]] = m
return inp_tensor
|
Replaces inf by maximum of tensor
|
shoot_infs
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tra.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tra.py
|
MIT
|
def transport_sample(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False):
"""
sample-wise transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [sample x states]
prob (torch.Tensor): router predicted probility, [sample x states]
hist_loss (torch.Tensor): history loss matrix, [sample x states]
count (list): sample counts for each day, empty list for sample-wise transport
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
"""
assert all_preds.shape == choice.shape
assert len(all_preds) == len(label)
assert transport_method in ["oracle", "router"]
all_loss = torch.zeros_like(all_preds)
mask = ~torch.isnan(label)
all_loss[mask] = (all_preds[mask] - label[mask, None]).pow(2) # [sample x states]
L = minmax_norm(all_loss.detach())
Lh = L * alpha + minmax_norm(hist_loss) * (1 - alpha) # add hist loss for transport
Lh = minmax_norm(Lh)
P = sinkhorn(-Lh)
del Lh
if transport_method == "router":
if training:
pred = (all_preds * choice).sum(dim=1) # gumbel softmax
else:
pred = all_preds[range(len(all_preds)), prob.argmax(dim=-1)] # argmax
else:
pred = (all_preds * P).sum(dim=1)
if transport_method == "router":
loss = loss_fn(pred, label)
else:
loss = (all_loss * P).sum(dim=1).mean()
return loss, pred, L, P
|
sample-wise transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [sample x states]
prob (torch.Tensor): router predicted probility, [sample x states]
hist_loss (torch.Tensor): history loss matrix, [sample x states]
count (list): sample counts for each day, empty list for sample-wise transport
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
|
transport_sample
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tra.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tra.py
|
MIT
|
def transport_daily(all_preds, label, choice, prob, hist_loss, count, transport_method, alpha, training=False):
"""
daily transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [days x states]
prob (torch.Tensor): router predicted probility, [days x states]
hist_loss (torch.Tensor): history loss matrix, [days x states]
count (list): sample counts for each day, [days]
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
"""
assert len(prob) == len(count)
assert len(all_preds) == sum(count)
assert transport_method in ["oracle", "router"]
all_loss = [] # loss of all predictions
start = 0
for i, cnt in enumerate(count):
slc = slice(start, start + cnt) # samples from the i-th day
start += cnt
tloss = loss_fn(all_preds[slc], label[slc]) # loss of the i-th day
all_loss.append(tloss)
all_loss = torch.stack(all_loss, dim=0) # [days x states]
L = minmax_norm(all_loss.detach())
Lh = L * alpha + minmax_norm(hist_loss) * (1 - alpha) # add hist loss for transport
Lh = minmax_norm(Lh)
P = sinkhorn(-Lh)
del Lh
pred = []
start = 0
for i, cnt in enumerate(count):
slc = slice(start, start + cnt) # samples from the i-th day
start += cnt
if transport_method == "router":
if training:
tpred = all_preds[slc] @ choice[i] # gumbel softmax
else:
tpred = all_preds[slc][:, prob[i].argmax(dim=-1)] # argmax
else:
tpred = all_preds[slc] @ P[i]
pred.append(tpred)
pred = torch.cat(pred, dim=0) # [samples]
if transport_method == "router":
loss = loss_fn(pred, label)
else:
loss = (all_loss * P).sum(dim=1).mean()
return loss, pred, L, P
|
daily transport
Args:
all_preds (torch.Tensor): predictions from all predictors, [sample x states]
label (torch.Tensor): label, [sample]
choice (torch.Tensor): gumbel softmax choice, [days x states]
prob (torch.Tensor): router predicted probility, [days x states]
hist_loss (torch.Tensor): history loss matrix, [days x states]
count (list): sample counts for each day, [days]
transport_method (str): transportation method
alpha (float): fusion parameter for calculating transport loss matrix
training (bool): indicate training or inference
|
transport_daily
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tra.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tra.py
|
MIT
|
def load_state_dict_unsafe(model, state_dict):
"""
Load state dict to provided model while ignore exceptions.
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model)
load = None # break load->load reference cycle
return {"unexpected_keys": unexpected_keys, "missing_keys": missing_keys, "error_msgs": error_msgs}
|
Load state dict to provided model while ignore exceptions.
|
load_state_dict_unsafe
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_tra.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_tra.py
|
MIT
|
def count_parameters(models_or_parameters, unit="m"):
"""
This function is to obtain the storage size unit of a (or multiple) models.
Parameters
----------
models_or_parameters : PyTorch model(s) or a list of parameters.
unit : the storage size unit.
Returns
-------
The number of parameters of the given model(s) or parameters.
"""
if isinstance(models_or_parameters, nn.Module):
counts = sum(v.numel() for v in models_or_parameters.parameters())
elif isinstance(models_or_parameters, nn.Parameter):
counts = models_or_parameters.numel()
elif isinstance(models_or_parameters, (list, tuple)):
return sum(count_parameters(x, unit) for x in models_or_parameters)
else:
counts = sum(v.numel() for v in models_or_parameters)
unit = unit.lower()
if unit in ("kb", "k"):
counts /= 2**10
elif unit in ("mb", "m"):
counts /= 2**20
elif unit in ("gb", "g"):
counts /= 2**30
elif unit is not None:
raise ValueError("Unknown unit: {:}".format(unit))
return counts
|
This function is to obtain the storage size unit of a (or multiple) models.
Parameters
----------
models_or_parameters : PyTorch model(s) or a list of parameters.
unit : the storage size unit.
Returns
-------
The number of parameters of the given model(s) or parameters.
|
count_parameters
|
python
|
microsoft/qlib
|
qlib/contrib/model/pytorch_utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/model/pytorch_utils.py
|
MIT
|
def __init__(self, user_data_path, save_report=True):
"""
This module is designed to manager the users in online system
all users' data were assumed to be saved in user_data_path
Parameter
user_data_path : string
data path that all users' data were saved in
variables:
data_path : string
data path that all users' data were saved in
users_file : string
A path of the file record the add_date of users
save_report : bool
whether to save report after each trading process
users : dict{}
[user_id]->User()
the python dict save instances of User() for each user_id
user_record : pd.Dataframe
user_id(string), add_date(string)
indicate the add_date for each users
"""
self.data_path = pathlib.Path(user_data_path)
self.users_file = self.data_path / "users.csv"
self.save_report = save_report
self.users = {}
self.user_record = None
|
This module is designed to manager the users in online system
all users' data were assumed to be saved in user_data_path
Parameter
user_data_path : string
data path that all users' data were saved in
variables:
data_path : string
data path that all users' data were saved in
users_file : string
A path of the file record the add_date of users
save_report : bool
whether to save report after each trading process
users : dict{}
[user_id]->User()
the python dict save instances of User() for each user_id
user_record : pd.Dataframe
user_id(string), add_date(string)
indicate the add_date for each users
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def load_users(self):
"""
load all users' data into manager
"""
self.users = {}
self.user_record = pd.read_csv(self.users_file, index_col=0)
for user_id in self.user_record.index:
self.users[user_id] = self.load_user(user_id)
|
load all users' data into manager
|
load_users
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def load_user(self, user_id):
"""
return a instance of User() represents a user to be processed
Parameter
user_id : string
:return
user : User()
"""
account_path = self.data_path / user_id
strategy_file = self.data_path / user_id / "strategy_{}.pickle".format(user_id)
model_file = self.data_path / user_id / "model_{}.pickle".format(user_id)
cur_user_list = list(self.users)
if user_id in cur_user_list:
raise ValueError("User {} has been loaded".format(user_id))
else:
trade_account = Account(0)
trade_account.load_account(account_path)
strategy = load_instance(strategy_file)
model = load_instance(model_file)
user = User(account=trade_account, strategy=strategy, model=model)
return user
|
return a instance of User() represents a user to be processed
Parameter
user_id : string
:return
user : User()
|
load_user
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def save_user_data(self, user_id):
"""
save a instance of User() to user data path
Parameter
user_id : string
"""
if not user_id in self.users:
raise ValueError("Cannot find user {}".format(user_id))
self.users[user_id].account.save_account(self.data_path / user_id)
save_instance(
self.users[user_id].strategy,
self.data_path / user_id / "strategy_{}.pickle".format(user_id),
)
save_instance(
self.users[user_id].model,
self.data_path / user_id / "model_{}.pickle".format(user_id),
)
|
save a instance of User() to user data path
Parameter
user_id : string
|
save_user_data
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def add_user(self, user_id, config_file, add_date):
"""
add the new user {user_id} into user data
will create a new folder named "{user_id}" in user data path
Parameter
user_id : string
init_cash : int
config_file : str/pathlib.Path()
path of config file
"""
config_file = pathlib.Path(config_file)
if not config_file.exists():
raise ValueError("Cannot find config file {}".format(config_file))
user_path = self.data_path / user_id
if user_path.exists():
raise ValueError("User data for {} already exists".format(user_id))
with config_file.open("r") as fp:
yaml = YAML(typ="safe", pure=True)
config = yaml.load(fp)
# load model
model = init_instance_by_config(config["model"])
# load strategy
strategy = init_instance_by_config(config["strategy"])
init_args = strategy.get_init_args_from_model(model, add_date)
strategy.init(**init_args)
# init Account
trade_account = Account(init_cash=config["init_cash"])
# save user
user_path.mkdir()
save_instance(model, self.data_path / user_id / "model_{}.pickle".format(user_id))
save_instance(strategy, self.data_path / user_id / "strategy_{}.pickle".format(user_id))
trade_account.save_account(self.data_path / user_id)
user_record = pd.read_csv(self.users_file, index_col=0)
user_record.loc[user_id] = [add_date]
user_record.to_csv(self.users_file)
|
add the new user {user_id} into user data
will create a new folder named "{user_id}" in user data path
Parameter
user_id : string
init_cash : int
config_file : str/pathlib.Path()
path of config file
|
add_user
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def remove_user(self, user_id):
"""
remove user {user_id} in current user dataset
will delete the folder "{user_id}" in user data path
:param
user_id : string
"""
user_path = self.data_path / user_id
if not user_path.exists():
raise ValueError("Cannot find user data {}".format(user_id))
shutil.rmtree(user_path)
user_record = pd.read_csv(self.users_file, index_col=0)
user_record.drop([user_id], inplace=True)
user_record.to_csv(self.users_file)
|
remove user {user_id} in current user dataset
will delete the folder "{user_id}" in user data path
:param
user_id : string
|
remove_user
|
python
|
microsoft/qlib
|
qlib/contrib/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/manager.py
|
MIT
|
def __init__(self, client: str):
"""
Parameters
----------
client: str
The qlib client config file(.yaml)
"""
self.logger = get_module_logger("online operator", level=logging.INFO)
self.client = client
|
Parameters
----------
client: str
The qlib client config file(.yaml)
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def init(client, path, date=None):
"""Initial UserManager(), get predict date and trade date
Parameters
----------
client: str
The qlib client config file(.yaml)
path : str
Path to save user account.
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
Return
----------
um: UserManager()
pred_date: pd.Timestamp
trade_date: pd.Timestamp
"""
qlib.init_from_yaml_conf(client)
um = UserManager(user_data_path=pathlib.Path(path))
um.load_users()
if not date:
trade_date, pred_date = None, None
else:
trade_date = pd.Timestamp(date)
if not is_tradable_date(trade_date):
raise ValueError("trade date is not tradable date".format(trade_date.date()))
pred_date = get_pre_trading_date(trade_date, future=True)
return um, pred_date, trade_date
|
Initial UserManager(), get predict date and trade date
Parameters
----------
client: str
The qlib client config file(.yaml)
path : str
Path to save user account.
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
Return
----------
um: UserManager()
pred_date: pd.Timestamp
trade_date: pd.Timestamp
|
init
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def add_user(self, id, config, path, date):
"""Add a new user into the a folder to run 'online' module.
Parameters
----------
id : str
User id, should be unique.
config : str
The file path (yaml) of user config
path : str
Path to save user account.
date : str (YYYY-MM-DD)
The date that user account was added.
"""
create_user_folder(path)
qlib.init_from_yaml_conf(self.client)
um = UserManager(user_data_path=path)
add_date = D.calendar(end_time=date)[-1]
if not is_tradable_date(add_date):
raise ValueError("add date is not tradable date".format(add_date.date()))
um.add_user(user_id=id, config_file=config, add_date=add_date)
|
Add a new user into the a folder to run 'online' module.
Parameters
----------
id : str
User id, should be unique.
config : str
The file path (yaml) of user config
path : str
Path to save user account.
date : str (YYYY-MM-DD)
The date that user account was added.
|
add_user
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def generate(self, date, path):
"""Generate order list that will be traded at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
path : str
Path to save user account.
"""
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, pred_date, user_id)
# get and save the score at predict date
input_data = user.model.get_data_with_date(pred_date)
score_series = user.model.predict(input_data)
save_score_series(score_series, (pathlib.Path(path) / user_id), trade_date)
# update strategy (and model)
user.strategy.update(score_series, pred_date, trade_date)
# generate and save order list
order_list = user.strategy.generate_trade_decision(
score_series=score_series,
current=user.account.current_position,
trade_exchange=trade_exchange,
trade_date=trade_date,
)
save_order_list(
order_list=order_list,
user_path=(pathlib.Path(path) / user_id),
trade_date=trade_date,
)
self.logger.info("Generate order list at {} for {}".format(trade_date, user_id))
um.save_user_data(user_id)
|
Generate order list that will be traded at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, when the generated order list will be traded.
path : str
Path to save user account.
|
generate
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def execute(self, date, exchange_config, path):
"""Execute the orderlist at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
exchange_config: str
The file path (yaml) of exchange config
path : str
Path to save user account.
"""
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, trade_date, user_id, exchange_config)
executor = SimulatorExecutor(trade_exchange=trade_exchange)
if str(dates[0].date()) != str(pred_date.date()):
raise ValueError(
"The account data is not newest! last trading date {}, today {}".format(
dates[0].date(), trade_date.date()
)
)
# load and execute the order list
# will not modify the trade_account after executing
order_list = load_order_list(user_path=(pathlib.Path(path) / user_id), trade_date=trade_date)
trade_info = executor.execute(order_list=order_list, trade_account=user.account, trade_date=trade_date)
executor.save_executed_file_from_trade_info(
trade_info=trade_info,
user_path=(pathlib.Path(path) / user_id),
trade_date=trade_date,
)
self.logger.info("execute order list at {} for {}".format(trade_date.date(), user_id))
|
Execute the orderlist at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
exchange_config: str
The file path (yaml) of exchange config
path : str
Path to save user account.
|
execute
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def update(self, date, path, type="SIM"):
"""Update account at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
path : str
Path to save user account.
type : str
which executor was been used to execute the order list
'SIM': SimulatorExecutor()
"""
if type not in ["SIM", "YC"]:
raise ValueError("type is invalid, {}".format(type))
um, pred_date, trade_date = self.init(self.client, path, date)
for user_id, user in um.users.items():
dates, trade_exchange = prepare(um, trade_date, user_id)
if type == "SIM":
executor = SimulatorExecutor(trade_exchange=trade_exchange)
else:
raise ValueError("not found executor")
# dates[0] is the last_trading_date
if str(dates[0].date()) > str(pred_date.date()):
raise ValueError(
"The account data is not newest! last trading date {}, today {}".format(
dates[0].date(), trade_date.date()
)
)
# load trade info and update account
trade_info = executor.load_trade_info_from_executed_file(
user_path=(pathlib.Path(path) / user_id), trade_date=trade_date
)
score_series = load_score_series((pathlib.Path(path) / user_id), trade_date)
update_account(user.account, trade_info, trade_exchange, trade_date)
portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()
self.logger.info(portfolio_metrics)
um.save_user_data(user_id)
self.logger.info("Update account state {} for {}".format(trade_date, user_id))
|
Update account at 'date'.
Parameters
----------
date : str (YYYY-MM-DD)
Trade date, that the generated order list will be traded.
path : str
Path to save user account.
type : str
which executor was been used to execute the order list
'SIM': SimulatorExecutor()
|
update
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def simulate(self, id, config, exchange_config, start, end, path, bench="SH000905"):
"""Run the ( generate_trade_decision -> execute_order_list -> update_account) process everyday
from start date to end date.
Parameters
----------
id : str
user id, need to be unique
config : str
The file path (yaml) of user config
exchange_config: str
The file path (yaml) of exchange config
start : str "YYYY-MM-DD"
The start date to run the online simulate
end : str "YYYY-MM-DD"
The end date to run the online simulate
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
"""
# Clear the current user if exists, then add a new user.
create_user_folder(path)
um = self.init(self.client, path, None)[0]
start_date, end_date = pd.Timestamp(start), pd.Timestamp(end)
try:
um.remove_user(user_id=id)
except BaseException:
pass
um.add_user(user_id=id, config_file=config, add_date=pd.Timestamp(start_date))
# Do the online simulate
um.load_users()
user = um.users[id]
dates, trade_exchange = prepare(um, end_date, id, exchange_config)
executor = SimulatorExecutor(trade_exchange=trade_exchange)
for pred_date, trade_date in zip(dates[:-2], dates[1:-1]):
user_path = pathlib.Path(path) / id
# 1. load and save score_series
input_data = user.model.get_data_with_date(pred_date)
score_series = user.model.predict(input_data)
save_score_series(score_series, (pathlib.Path(path) / id), trade_date)
# 2. update strategy (and model)
user.strategy.update(score_series, pred_date, trade_date)
# 3. generate and save order list
order_list = user.strategy.generate_trade_decision(
score_series=score_series,
current=user.account.current_position,
trade_exchange=trade_exchange,
trade_date=trade_date,
)
save_order_list(order_list=order_list, user_path=user_path, trade_date=trade_date)
# 4. auto execute order list
order_list = load_order_list(user_path=user_path, trade_date=trade_date)
trade_info = executor.execute(trade_account=user.account, order_list=order_list, trade_date=trade_date)
executor.save_executed_file_from_trade_info(
trade_info=trade_info, user_path=user_path, trade_date=trade_date
)
# 5. update account state
trade_info = executor.load_trade_info_from_executed_file(user_path=user_path, trade_date=trade_date)
update_account(user.account, trade_info, trade_exchange, trade_date)
portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()
self.logger.info(portfolio_metrics)
um.save_user_data(id)
self.show(id, path, bench)
|
Run the ( generate_trade_decision -> execute_order_list -> update_account) process everyday
from start date to end date.
Parameters
----------
id : str
user id, need to be unique
config : str
The file path (yaml) of user config
exchange_config: str
The file path (yaml) of exchange config
start : str "YYYY-MM-DD"
The start date to run the online simulate
end : str "YYYY-MM-DD"
The end date to run the online simulate
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
|
simulate
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def show(self, id, path, bench="SH000905"):
"""show the newly report (mean, std, information_ratio, annualized_return)
Parameters
----------
id : str
user id, need to be unique
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
"""
um = self.init(self.client, path, None)[0]
if id not in um.users:
raise ValueError("Cannot find user ".format(id))
bench = D.features([bench], ["$change"]).loc[bench, "$change"]
portfolio_metrics = um.users[id].account.portfolio_metrics.generate_portfolio_metrics_dataframe()
portfolio_metrics["bench"] = bench
analysis_result = {}
r = (portfolio_metrics["return"] - portfolio_metrics["bench"]).dropna()
analysis_result["excess_return_without_cost"] = risk_analysis(r)
r = (portfolio_metrics["return"] - portfolio_metrics["bench"] - portfolio_metrics["cost"]).dropna()
analysis_result["excess_return_with_cost"] = risk_analysis(r)
print("Result:")
print("excess_return_without_cost:")
print(analysis_result["excess_return_without_cost"])
print("excess_return_with_cost:")
print(analysis_result["excess_return_with_cost"])
|
show the newly report (mean, std, information_ratio, annualized_return)
Parameters
----------
id : str
user id, need to be unique
path : str
Path to save user account.
bench : str
The benchmark that our result compared with.
'SH000905' for csi500, 'SH000300' for csi300
|
show
|
python
|
microsoft/qlib
|
qlib/contrib/online/operator.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/operator.py
|
MIT
|
def __init__(self, account, strategy, model, verbose=False):
"""
A user in online system, which contains account, strategy and model three module.
Parameter
account : Account()
strategy :
a strategy instance
model :
a model instance
report_save_path : string
the path to save report. Will not save report if None
verbose : bool
Whether to print the info during the process
"""
self.logger = get_module_logger("User", level=logging.INFO)
self.account = account
self.strategy = strategy
self.model = model
self.verbose = verbose
|
A user in online system, which contains account, strategy and model three module.
Parameter
account : Account()
strategy :
a strategy instance
model :
a model instance
report_save_path : string
the path to save report. Will not save report if None
verbose : bool
Whether to print the info during the process
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/online/user.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/user.py
|
MIT
|
def init_state(self, date):
"""
init state when each trading date begin
Parameter
date : pd.Timestamp
"""
self.account.init_state(today=date)
self.strategy.init_state(trade_date=date, model=self.model, account=self.account)
return
|
init state when each trading date begin
Parameter
date : pd.Timestamp
|
init_state
|
python
|
microsoft/qlib
|
qlib/contrib/online/user.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/user.py
|
MIT
|
def get_latest_trading_date(self):
"""
return the latest trading date for user {user_id}
Parameter
user_id : string
:return
date : string (e.g '2018-10-08')
"""
if not self.account.last_trade_date:
return None
return str(self.account.last_trade_date.date())
|
return the latest trading date for user {user_id}
Parameter
user_id : string
:return
date : string (e.g '2018-10-08')
|
get_latest_trading_date
|
python
|
microsoft/qlib
|
qlib/contrib/online/user.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/user.py
|
MIT
|
def showReport(self, benchmark="SH000905"):
"""
show the newly report (mean, std, information_ratio, annualized_return)
Parameter
benchmark : string
bench that to be compared, 'SH000905' for csi500
"""
bench = D.features([benchmark], ["$change"], disk_cache=True).loc[benchmark, "$change"]
portfolio_metrics = self.account.portfolio_metrics.generate_portfolio_metrics_dataframe()
portfolio_metrics["bench"] = bench
analysis_result = {"pred": {}, "excess_return_without_cost": {}, "excess_return_with_cost": {}}
r = (portfolio_metrics["return"] - portfolio_metrics["bench"]).dropna()
analysis_result["excess_return_without_cost"][0] = risk_analysis(r)
r = (portfolio_metrics["return"] - portfolio_metrics["bench"] - portfolio_metrics["cost"]).dropna()
analysis_result["excess_return_with_cost"][0] = risk_analysis(r)
self.logger.info("Result of porfolio:")
self.logger.info("excess_return_without_cost:")
self.logger.info(analysis_result["excess_return_without_cost"][0])
self.logger.info("excess_return_with_cost:")
self.logger.info(analysis_result["excess_return_with_cost"][0])
return portfolio_metrics
|
show the newly report (mean, std, information_ratio, annualized_return)
Parameter
benchmark : string
bench that to be compared, 'SH000905' for csi500
|
showReport
|
python
|
microsoft/qlib
|
qlib/contrib/online/user.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/user.py
|
MIT
|
def load_instance(file_path):
"""
load a pickle file
Parameter
file_path : string / pathlib.Path()
path of file to be loaded
:return
An instance loaded from file
"""
file_path = pathlib.Path(file_path)
if not file_path.exists():
raise ValueError("Cannot find file {}".format(file_path))
with file_path.open("rb") as fr:
instance = pickle.load(fr)
return instance
|
load a pickle file
Parameter
file_path : string / pathlib.Path()
path of file to be loaded
:return
An instance loaded from file
|
load_instance
|
python
|
microsoft/qlib
|
qlib/contrib/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/utils.py
|
MIT
|
def save_instance(instance, file_path):
"""
save(dump) an instance to a pickle file
Parameter
instance :
data to be dumped
file_path : string / pathlib.Path()
path of file to be dumped
"""
file_path = pathlib.Path(file_path)
with file_path.open("wb") as fr:
pickle.dump(instance, fr, C.dump_protocol_version)
|
save(dump) an instance to a pickle file
Parameter
instance :
data to be dumped
file_path : string / pathlib.Path()
path of file to be dumped
|
save_instance
|
python
|
microsoft/qlib
|
qlib/contrib/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/utils.py
|
MIT
|
def prepare(um, today, user_id, exchange_config=None):
"""
1. Get the dates that need to do trading till today for user {user_id}
dates[0] indicate the latest trading date of User{user_id},
if User{user_id} haven't do trading before, than dates[0] presents the init date of User{user_id}.
2. Set the exchange with exchange_config file
Parameter
um : UserManager()
today : pd.Timestamp()
user_id : str
:return
dates : list of pd.Timestamp
trade_exchange : Exchange()
"""
# get latest trading date for {user_id}
# if is None, indicate it haven't traded, then last trading date is init date of {user_id}
latest_trading_date = um.users[user_id].get_latest_trading_date()
if not latest_trading_date:
latest_trading_date = um.user_record.loc[user_id][0]
if str(today.date()) < latest_trading_date:
log.warning("user_id:{}, last trading date {} after today {}".format(user_id, latest_trading_date, today))
return [pd.Timestamp(latest_trading_date)], None
dates = D.calendar(
start_time=pd.Timestamp(latest_trading_date),
end_time=pd.Timestamp(today),
future=True,
)
dates = list(dates)
dates.append(get_next_trading_date(dates[-1], future=True))
if exchange_config:
with pathlib.Path(exchange_config).open("r") as fp:
yaml = YAML(typ="safe", pure=True)
exchange_paras = yaml.load(fp)
else:
exchange_paras = {}
trade_exchange = Exchange(trade_dates=dates, **exchange_paras)
return dates, trade_exchange
|
1. Get the dates that need to do trading till today for user {user_id}
dates[0] indicate the latest trading date of User{user_id},
if User{user_id} haven't do trading before, than dates[0] presents the init date of User{user_id}.
2. Set the exchange with exchange_config file
Parameter
um : UserManager()
today : pd.Timestamp()
user_id : str
:return
dates : list of pd.Timestamp
trade_exchange : Exchange()
|
prepare
|
python
|
microsoft/qlib
|
qlib/contrib/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/online/utils.py
|
MIT
|
def get_calendar_day(freq="1min", future=False):
"""
Load High-Freq Calendar Date Using Memcache.
!!!NOTE: Loading the calendar is quite slow. So loading calendar before start multiprocessing will make it faster.
Parameters
----------
freq : str
frequency of read calendar file.
future : bool
whether including future trading day.
Returns
-------
_calendar:
array of date.
"""
flag = f"{freq}_future_{future}_day"
if flag in H["c"]:
_calendar = H["c"][flag]
else:
_calendar = np.array(list(map(lambda x: x.date(), Cal.load_calendar(freq, future))))
H["c"][flag] = _calendar
return _calendar
|
Load High-Freq Calendar Date Using Memcache.
!!!NOTE: Loading the calendar is quite slow. So loading calendar before start multiprocessing will make it faster.
Parameters
----------
freq : str
frequency of read calendar file.
future : bool
whether including future trading day.
Returns
-------
_calendar:
array of date.
|
get_calendar_day
|
python
|
microsoft/qlib
|
qlib/contrib/ops/high_freq.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/ops/high_freq.py
|
MIT
|
def get_calendar_minute(freq="day", future=False):
"""Load High-Freq Calendar Minute Using Memcache"""
flag = f"{freq}_future_{future}_day"
if flag in H["c"]:
_calendar = H["c"][flag]
else:
_calendar = np.array(list(map(lambda x: x.minute // 30, Cal.load_calendar(freq, future))))
H["c"][flag] = _calendar
return _calendar
|
Load High-Freq Calendar Minute Using Memcache
|
get_calendar_minute
|
python
|
microsoft/qlib
|
qlib/contrib/ops/high_freq.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/ops/high_freq.py
|
MIT
|
def __init__(
self, df: pd.DataFrame = None, layout: dict = None, graph_kwargs: dict = None, name_dict: dict = None, **kwargs
):
"""
:param df:
:param layout:
:param graph_kwargs:
:param name_dict:
:param kwargs:
layout: dict
go.Layout parameters
graph_kwargs: dict
Graph parameters, eg: go.Bar(**graph_kwargs)
"""
self._df = df
self._layout = dict() if layout is None else layout
self._graph_kwargs = dict() if graph_kwargs is None else graph_kwargs
self._name_dict = name_dict
self.data = None
self._init_parameters(**kwargs)
self._init_data()
|
:param df:
:param layout:
:param graph_kwargs:
:param name_dict:
:param kwargs:
layout: dict
go.Layout parameters
graph_kwargs: dict
Graph parameters, eg: go.Bar(**graph_kwargs)
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/report/graph.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/graph.py
|
MIT
|
def __init__(
self,
df: pd.DataFrame = None,
kind_map: dict = None,
layout: dict = None,
sub_graph_layout: dict = None,
sub_graph_data: list = None,
subplots_kwargs: dict = None,
**kwargs,
):
"""
:param df: pd.DataFrame
:param kind_map: dict, subplots graph kind and kwargs
eg: dict(kind='ScatterGraph', kwargs=dict())
:param layout: `go.Layout` parameters
:param sub_graph_layout: Layout of each graphic, similar to 'layout'
:param sub_graph_data: Instantiation parameters for each sub-graphic
eg: [(column_name, instance_parameters), ]
column_name: str or go.Figure
Instance_parameters:
- row: int, the row where the graph is located
- col: int, the col where the graph is located
- name: str, show name, default column_name in 'df'
- kind: str, graph kind, default `kind` param, eg: bar, scatter, ...
- graph_kwargs: dict, graph kwargs, default {}, used in `go.Bar(**graph_kwargs)`
:param subplots_kwargs: `plotly.tools.make_subplots` original parameters
- shared_xaxes: bool, default False
- shared_yaxes: bool, default False
- vertical_spacing: float, default 0.3 / rows
- subplot_titles: list, default []
If `sub_graph_data` is None, will generate 'subplot_titles' according to `df.columns`,
this field will be discarded
- specs: list, see `make_subplots` docs
- rows: int, Number of rows in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'rows' according to `df`, this field will be discarded
- cols: int, Number of cols in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'cols' according to `df`, this field will be discarded
:param kwargs:
"""
self._df = df
self._layout = layout
self._sub_graph_layout = sub_graph_layout
self._kind_map = kind_map
if self._kind_map is None:
self._kind_map = dict(kind="ScatterGraph", kwargs=dict())
self._subplots_kwargs = subplots_kwargs
if self._subplots_kwargs is None:
self._init_subplots_kwargs()
self.__cols = self._subplots_kwargs.get("cols", 2) # pylint: disable=W0238
self.__rows = self._subplots_kwargs.get( # pylint: disable=W0238
"rows", math.ceil(len(self._df.columns) / self.__cols)
)
self._sub_graph_data = sub_graph_data
if self._sub_graph_data is None:
self._init_sub_graph_data()
self._init_figure()
|
:param df: pd.DataFrame
:param kind_map: dict, subplots graph kind and kwargs
eg: dict(kind='ScatterGraph', kwargs=dict())
:param layout: `go.Layout` parameters
:param sub_graph_layout: Layout of each graphic, similar to 'layout'
:param sub_graph_data: Instantiation parameters for each sub-graphic
eg: [(column_name, instance_parameters), ]
column_name: str or go.Figure
Instance_parameters:
- row: int, the row where the graph is located
- col: int, the col where the graph is located
- name: str, show name, default column_name in 'df'
- kind: str, graph kind, default `kind` param, eg: bar, scatter, ...
- graph_kwargs: dict, graph kwargs, default {}, used in `go.Bar(**graph_kwargs)`
:param subplots_kwargs: `plotly.tools.make_subplots` original parameters
- shared_xaxes: bool, default False
- shared_yaxes: bool, default False
- vertical_spacing: float, default 0.3 / rows
- subplot_titles: list, default []
If `sub_graph_data` is None, will generate 'subplot_titles' according to `df.columns`,
this field will be discarded
- specs: list, see `make_subplots` docs
- rows: int, Number of rows in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'rows' according to `df`, this field will be discarded
- cols: int, Number of cols in the subplot grid, default 1
If `sub_graph_data` is None, will generate 'cols' according to `df`, this field will be discarded
:param kwargs:
|
__init__
|
python
|
microsoft/qlib
|
qlib/contrib/report/graph.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/graph.py
|
MIT
|
def guess_plotly_rangebreaks(dt_index: pd.DatetimeIndex):
"""
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
"""
dt_idx = dt_index.sort_values()
gaps = dt_idx[1:] - dt_idx[:-1]
min_gap = gaps.min()
gaps_to_break = {}
for gap, d in zip(gaps, dt_idx[:-1]):
if gap > min_gap:
gaps_to_break.setdefault(gap - min_gap, []).append(d + min_gap)
return [dict(values=v, dvalue=int(k.total_seconds() * 1000)) for k, v in gaps_to_break.items()]
|
This function `guesses` the rangebreaks required to remove gaps in datetime index.
It basically calculates the difference between a `continuous` datetime index and index given.
For more details on `rangebreaks` params in plotly, see
https://plotly.com/python/reference/layout/xaxis/#layout-xaxis-rangebreaks
Parameters
----------
dt_index: pd.DatetimeIndex
The datetimes of the data.
Returns
-------
the `rangebreaks` to be passed into plotly axis.
|
guess_plotly_rangebreaks
|
python
|
microsoft/qlib
|
qlib/contrib/report/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/utils.py
|
MIT
|
def _pred_ic(
pred_label: pd.DataFrame = None, methods: Sequence[Literal["IC", "Rank IC"]] = ("IC", "Rank IC"), **kwargs
) -> tuple:
"""
:param pred_label: pd.DataFrame
must contain one column of realized return with name `label` and one column of predicted score names `score`.
:param methods: Sequence[Literal["IC", "Rank IC"]]
IC series to plot.
IC is sectional pearson correlation between label and score
Rank IC is the spearman correlation between label and score
For the Monthly IC, IC histogram, IC Q-Q plot. Only the first type of IC will be plotted.
:return:
"""
_methods_mapping = {"IC": "pearson", "Rank IC": "spearman"}
def _corr_series(x, method):
return x["label"].corr(x["score"], method=method)
ic_df = pd.concat(
[
pred_label.groupby(level="datetime", group_keys=False)
.apply(partial(_corr_series, method=_methods_mapping[m]))
.rename(m)
for m in methods
],
axis=1,
)
_ic = ic_df.iloc(axis=1)[0]
_index = _ic.index.get_level_values(0).astype("str").str.replace("-", "").str.slice(0, 6)
_monthly_ic = _ic.groupby(_index, group_keys=False).mean()
_monthly_ic.index = pd.MultiIndex.from_arrays(
[_monthly_ic.index.str.slice(0, 4), _monthly_ic.index.str.slice(4, 6)],
names=["year", "month"],
)
# fill month
_month_list = pd.date_range(
start=pd.Timestamp(f"{_index.min()[:4]}0101"),
end=pd.Timestamp(f"{_index.max()[:4]}1231"),
freq="1M",
)
_years = []
_month = []
for _date in _month_list:
_date = _date.strftime("%Y%m%d")
_years.append(_date[:4])
_month.append(_date[4:6])
fill_index = pd.MultiIndex.from_arrays([_years, _month], names=["year", "month"])
_monthly_ic = _monthly_ic.reindex(fill_index)
ic_bar_figure = ic_figure(ic_df, kwargs.get("show_nature_day", False))
ic_heatmap_figure = HeatmapGraph(
_monthly_ic.unstack(),
layout=dict(title="Monthly IC", xaxis=dict(dtick=1), yaxis=dict(tickformat="04d", dtick=1)),
graph_kwargs=dict(xtype="array", ytype="array"),
).figure
dist = stats.norm
_qqplot_fig = _plot_qq(_ic, dist)
if isinstance(dist, stats.norm.__class__):
dist_name = "Normal"
else:
dist_name = "Unknown"
_ic_df = _ic.to_frame("IC")
_bin_size = ((_ic_df.max() - _ic_df.min()) / 20).min()
_sub_graph_data = [
(
"IC",
dict(
row=1,
col=1,
name="",
kind="DistplotGraph",
graph_kwargs=dict(bin_size=_bin_size),
),
),
(_qqplot_fig, dict(row=1, col=2)),
]
ic_hist_figure = SubplotsGraph(
_ic_df.dropna(),
kind_map=dict(kind="HistogramGraph", kwargs=dict()),
subplots_kwargs=dict(
rows=1,
cols=2,
print_grid=False,
subplot_titles=["IC", "IC %s Dist. Q-Q" % dist_name],
),
sub_graph_data=_sub_graph_data,
layout=dict(
yaxis2=dict(title="Observed Quantile"),
xaxis2=dict(title=f"{dist_name} Distribution Quantile"),
),
).figure
return ic_bar_figure, ic_heatmap_figure, ic_hist_figure
|
:param pred_label: pd.DataFrame
must contain one column of realized return with name `label` and one column of predicted score names `score`.
:param methods: Sequence[Literal["IC", "Rank IC"]]
IC series to plot.
IC is sectional pearson correlation between label and score
Rank IC is the spearman correlation between label and score
For the Monthly IC, IC histogram, IC Q-Q plot. Only the first type of IC will be plotted.
:return:
|
_pred_ic
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_model/analysis_model_performance.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_model/analysis_model_performance.py
|
MIT
|
def ic_figure(ic_df: pd.DataFrame, show_nature_day=True, **kwargs) -> go.Figure:
r"""IC figure
:param ic_df: ic DataFrame
:param show_nature_day: whether to display the abscissa of non-trading day
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: plotly.graph_objs.Figure
"""
if show_nature_day:
date_index = pd.date_range(ic_df.index.min(), ic_df.index.max())
ic_df = ic_df.reindex(date_index)
ic_bar_figure = BarGraph(
ic_df,
layout=dict(
title="Information Coefficient (IC)",
xaxis=dict(tickangle=45, rangebreaks=kwargs.get("rangebreaks", guess_plotly_rangebreaks(ic_df.index))),
),
).figure
return ic_bar_figure
|
IC figure
:param ic_df: ic DataFrame
:param show_nature_day: whether to display the abscissa of non-trading day
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: plotly.graph_objs.Figure
|
ic_figure
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_model/analysis_model_performance.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_model/analysis_model_performance.py
|
MIT
|
def model_performance_graph(
pred_label: pd.DataFrame,
lag: int = 1,
N: int = 5,
reverse=False,
rank=False,
graph_names: list = ["group_return", "pred_ic", "pred_autocorr"],
show_notebook: bool = True,
show_nature_day: bool = False,
**kwargs,
) -> [list, tuple]:
r"""Model performance
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
It is usually same as the label of model training(e.g. "Ref($close, -2)/Ref($close, -1) - 1").
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param lag: `pred.groupby(level='instrument', group_keys=False)['score'].shift(lag)`. It will be only used in the auto-correlation computing.
:param N: group number, default 5.
:param reverse: if `True`, `pred['score'] *= -1`.
:param rank: if **True**, calculate rank ic.
:param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover'].
:param show_notebook: whether to display graphics in notebook, the default is `True`.
:param show_nature_day: whether to display the abscissa of non-trading day.
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list.
"""
figure_list = []
for graph_name in graph_names:
fun_res = eval(f"_{graph_name}")(
pred_label=pred_label, lag=lag, N=N, reverse=reverse, rank=rank, show_nature_day=show_nature_day, **kwargs
)
figure_list += fun_res
if show_notebook:
BarGraph.show_graph_in_notebook(figure_list)
else:
return figure_list
|
Model performance
:param pred_label: index is **pd.MultiIndex**, index name is **[instrument, datetime]**; columns names is **[score, label]**.
It is usually same as the label of model training(e.g. "Ref($close, -2)/Ref($close, -1) - 1").
.. code-block:: python
instrument datetime score label
SH600004 2017-12-11 -0.013502 -0.013502
2017-12-12 -0.072367 -0.072367
2017-12-13 -0.068605 -0.068605
2017-12-14 0.012440 0.012440
2017-12-15 -0.102778 -0.102778
:param lag: `pred.groupby(level='instrument', group_keys=False)['score'].shift(lag)`. It will be only used in the auto-correlation computing.
:param N: group number, default 5.
:param reverse: if `True`, `pred['score'] *= -1`.
:param rank: if **True**, calculate rank ic.
:param graph_names: graph names; default ['cumulative_return', 'pred_ic', 'pred_autocorr', 'pred_turnover'].
:param show_notebook: whether to display graphics in notebook, the default is `True`.
:param show_nature_day: whether to display the abscissa of non-trading day.
:param \*\*kwargs: contains some parameters to control plot style in plotly. Currently, supports
- `rangebreaks`: https://plotly.com/python/time-series/#Hiding-Weekends-and-Holidays
:return: if show_notebook is True, display in notebook; else return `plotly.graph_objs.Figure` list.
|
model_performance_graph
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_model/analysis_model_performance.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_model/analysis_model_performance.py
|
MIT
|
def _get_cum_return_data_with_position(
position: dict,
report_normal: pd.DataFrame,
label_data: pd.DataFrame,
start_date=None,
end_date=None,
):
"""
:param position:
:param report_normal:
:param label_data:
:param start_date:
:param end_date:
:return:
"""
_cumulative_return_df = get_position_data(
position=position,
report_normal=report_normal,
label_data=label_data,
start_date=start_date,
end_date=end_date,
).copy()
_cumulative_return_df["label"] = _cumulative_return_df["label"] - _cumulative_return_df["bench"]
_cumulative_return_df = _cumulative_return_df.dropna()
df_gp = _cumulative_return_df.groupby(level="datetime", group_keys=False)
result_list = []
for gp in df_gp:
date = gp[0]
day_df = gp[1]
_hold_df = day_df[day_df["status"] == 0]
_buy_df = day_df[day_df["status"] == 1]
_sell_df = day_df[day_df["status"] == -1]
hold_value = (_hold_df["label"] * _hold_df["weight"]).sum()
hold_weight = _hold_df["weight"].sum()
hold_mean = (hold_value / hold_weight) if hold_weight else 0
sell_value = (_sell_df["label"] * _sell_df["weight"]).sum()
sell_weight = _sell_df["weight"].sum()
sell_mean = (sell_value / sell_weight) if sell_weight else 0
buy_value = (_buy_df["label"] * _buy_df["weight"]).sum()
buy_weight = _buy_df["weight"].sum()
buy_mean = (buy_value / buy_weight) if buy_weight else 0
result_list.append(
dict(
hold_value=hold_value,
hold_mean=hold_mean,
hold_weight=hold_weight,
buy_value=buy_value,
buy_mean=buy_mean,
buy_weight=buy_weight,
sell_value=sell_value,
sell_mean=sell_mean,
sell_weight=sell_weight,
buy_minus_sell_value=buy_value - sell_value,
buy_minus_sell_mean=buy_mean - sell_mean,
buy_plus_sell_weight=buy_weight + sell_weight,
date=date,
)
)
r_df = pd.DataFrame(data=result_list)
r_df["cum_hold"] = r_df["hold_mean"].cumsum()
r_df["cum_buy"] = r_df["buy_mean"].cumsum()
r_df["cum_sell"] = r_df["sell_mean"].cumsum()
r_df["cum_buy_minus_sell"] = r_df["buy_minus_sell_mean"].cumsum()
return r_df
|
:param position:
:param report_normal:
:param label_data:
:param start_date:
:param end_date:
:return:
|
_get_cum_return_data_with_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/cumulative_return.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/cumulative_return.py
|
MIT
|
def _get_figure_with_position(
position: dict,
report_normal: pd.DataFrame,
label_data: pd.DataFrame,
start_date=None,
end_date=None,
) -> Iterable[go.Figure]:
"""Get average analysis figures
:param position: position
:param report_normal:
:param label_data:
:param start_date:
:param end_date:
:return:
"""
cum_return_df = _get_cum_return_data_with_position(position, report_normal, label_data, start_date, end_date)
cum_return_df = cum_return_df.set_index("date")
# FIXME: support HIGH-FREQ
cum_return_df.index = cum_return_df.index.strftime("%Y-%m-%d")
# Create figures
for _t_name in ["buy", "sell", "buy_minus_sell", "hold"]:
sub_graph_data = [
(
"cum_{}".format(_t_name),
dict(row=1, col=1, graph_kwargs={"mode": "lines+markers", "xaxis": "x3"}),
),
(
"{}_weight".format(_t_name.replace("minus", "plus") if "minus" in _t_name else _t_name),
dict(row=2, col=1),
),
(
"{}_value".format(_t_name),
dict(row=1, col=2, kind="HistogramGraph", graph_kwargs={}),
),
]
_default_xaxis = dict(showline=False, zeroline=True, tickangle=45)
_default_yaxis = dict(zeroline=True, showline=True, showticklabels=True)
sub_graph_layout = dict(
xaxis1=dict(**_default_xaxis, type="category", showticklabels=False),
xaxis3=dict(**_default_xaxis, type="category"),
xaxis2=_default_xaxis,
yaxis1=dict(**_default_yaxis, title=_t_name),
yaxis2=_default_yaxis,
yaxis3=_default_yaxis,
)
mean_value = cum_return_df["{}_value".format(_t_name)].mean()
layout = dict(
height=500,
title=f"{_t_name}(the red line in the histogram on the right represents the average)",
shapes=[
{
"type": "line",
"xref": "x2",
"yref": "paper",
"x0": mean_value,
"y0": 0,
"x1": mean_value,
"y1": 1,
# NOTE: 'fillcolor': '#d3d3d3', 'opacity': 0.3,
"line": {"color": "red", "width": 1},
},
],
)
kind_map = dict(kind="ScatterGraph", kwargs=dict(mode="lines+markers"))
specs = [
[{"rowspan": 1}, {"rowspan": 2}],
[{"rowspan": 1}, None],
]
subplots_kwargs = dict(
vertical_spacing=0.01,
rows=2,
cols=2,
row_width=[1, 2],
column_width=[3, 1],
print_grid=False,
specs=specs,
)
yield SubplotsGraph(
cum_return_df,
layout=layout,
kind_map=kind_map,
sub_graph_layout=sub_graph_layout,
sub_graph_data=sub_graph_data,
subplots_kwargs=subplots_kwargs,
).figure
|
Get average analysis figures
:param position: position
:param report_normal:
:param label_data:
:param start_date:
:param end_date:
:return:
|
_get_figure_with_position
|
python
|
microsoft/qlib
|
qlib/contrib/report/analysis_position/cumulative_return.py
|
https://github.com/microsoft/qlib/blob/master/qlib/contrib/report/analysis_position/cumulative_return.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.