code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def remove_repeat_field(fields):
"""remove repeat field
:param fields: list; features fields
:return: list
"""
fields = copy.deepcopy(fields)
_fields = set(fields)
return sorted(_fields, key=fields.index)
|
remove repeat field
:param fields: list; features fields
:return: list
|
remove_repeat_field
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def remove_fields_space(fields: [list, str, tuple]):
"""remove fields space
:param fields: features fields
:return: list or str
"""
if isinstance(fields, str):
return fields.replace(" ", "")
return [i.replace(" ", "") if isinstance(i, str) else str(i) for i in fields]
|
remove fields space
:param fields: features fields
:return: list or str
|
remove_fields_space
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def normalize_cache_instruments(instruments):
"""normalize cache instruments
:return: list or dict
"""
if isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments = sorted(list(instruments))
else:
# dict type stockpool
if "market" in instruments:
pass
else:
instruments = {k: sorted(v) for k, v in instruments.items()}
return instruments
|
normalize cache instruments
:return: list or dict
|
normalize_cache_instruments
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def is_tradable_date(cur_date):
"""judgy whether date is a tradable date
----------
date : pandas.Timestamp
current date
"""
from ..data import D # pylint: disable=C0415
return str(cur_date.date()) == str(D.calendar(start_time=cur_date, future=True)[0].date())
|
judgy whether date is a tradable date
----------
date : pandas.Timestamp
current date
|
is_tradable_date
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def get_date_range(trading_date, left_shift=0, right_shift=0, future=False):
"""get trading date range by shift
Parameters
----------
trading_date: pd.Timestamp
left_shift: int
right_shift: int
future: bool
"""
from ..data import D # pylint: disable=C0415
start = get_date_by_shift(trading_date, left_shift, future=future)
end = get_date_by_shift(trading_date, right_shift, future=future)
calendar = D.calendar(start, end, future=future)
return calendar
|
get trading date range by shift
Parameters
----------
trading_date: pd.Timestamp
left_shift: int
right_shift: int
future: bool
|
get_date_range
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def get_date_by_shift(
trading_date,
shift,
future=False,
clip_shift=True,
freq="day",
align: Optional[str] = None,
):
"""get trading date with shift bias will cur_date
e.g. : shift == 1, return next trading date
shift == -1, return previous trading date
----------
trading_date : pandas.Timestamp
current date
shift : int
clip_shift: bool
align : Optional[str]
When align is None, this function will raise ValueError if `trading_date` is not a trading date
when align is "left"/"right", it will try to align to left/right nearest trading date before shifting when `trading_date` is not a trading date
"""
from qlib.data import D # pylint: disable=C0415
cal = D.calendar(future=future, freq=freq)
trading_date = pd.to_datetime(trading_date)
if align is None:
if trading_date not in list(cal):
raise ValueError("{} is not trading day!".format(str(trading_date)))
_index = bisect.bisect_left(cal, trading_date)
elif align == "left":
_index = bisect.bisect_right(cal, trading_date) - 1
elif align == "right":
_index = bisect.bisect_left(cal, trading_date)
else:
raise ValueError(f"align with value `{align}` is not supported")
shift_index = _index + shift
if shift_index < 0 or shift_index >= len(cal):
if clip_shift:
shift_index = np.clip(shift_index, 0, len(cal) - 1)
else:
raise IndexError(f"The shift_index({shift_index}) of the trading day ({trading_date}) is out of range")
return cal[shift_index]
|
get trading date with shift bias will cur_date
e.g. : shift == 1, return next trading date
shift == -1, return previous trading date
----------
trading_date : pandas.Timestamp
current date
shift : int
clip_shift: bool
align : Optional[str]
When align is None, this function will raise ValueError if `trading_date` is not a trading date
when align is "left"/"right", it will try to align to left/right nearest trading date before shifting when `trading_date` is not a trading date
|
get_date_by_shift
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def transform_end_date(end_date=None, freq="day"):
"""handle the end date with various format
If end_date is -1, None, or end_date is greater than the maximum trading day, the last trading date is returned.
Otherwise, returns the end_date
----------
end_date: str
end trading date
date : pandas.Timestamp
current date
"""
from ..data import D # pylint: disable=C0415
last_date = D.calendar(freq=freq)[-1]
if end_date is None or (str(end_date) == "-1") or (pd.Timestamp(last_date) < pd.Timestamp(end_date)):
log.warning(
"\nInfo: the end_date in the configuration file is {}, "
"so the default last date {} is used.".format(end_date, last_date)
)
end_date = last_date
return end_date
|
handle the end date with various format
If end_date is -1, None, or end_date is greater than the maximum trading day, the last trading date is returned.
Otherwise, returns the end_date
----------
end_date: str
end trading date
date : pandas.Timestamp
current date
|
transform_end_date
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def get_date_in_file_name(file_name):
"""Get the date(YYYY-MM-DD) written in file name
Parameter
file_name : str
:return
date : str
'YYYY-MM-DD'
"""
pattern = "[0-9]{4}-[0-9]{2}-[0-9]{2}"
date = re.search(pattern, str(file_name)).group()
return date
|
Get the date(YYYY-MM-DD) written in file name
Parameter
file_name : str
:return
date : str
'YYYY-MM-DD'
|
get_date_in_file_name
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def split_pred(pred, number=None, split_date=None):
"""split the score file into two part
Parameter
---------
pred : pd.DataFrame (index:<instrument, datetime>)
A score file of stocks
number: the number of dates for pred_left
split_date: the last date of the pred_left
Return
-------
pred_left : pd.DataFrame (index:<instrument, datetime>)
The first part of original score file
pred_right : pd.DataFrame (index:<instrument, datetime>)
The second part of original score file
"""
if number is None and split_date is None:
raise ValueError("`number` and `split date` cannot both be None")
dates = sorted(pred.index.get_level_values("datetime").unique())
dates = list(map(pd.Timestamp, dates))
if split_date is None:
date_left_end = dates[number - 1]
date_right_begin = dates[number]
date_left_start = None
else:
split_date = pd.Timestamp(split_date)
date_left_end = split_date
date_right_begin = split_date + pd.Timedelta(days=1)
if number is None:
date_left_start = None
else:
end_idx = bisect.bisect_right(dates, split_date)
date_left_start = dates[end_idx - number]
pred_temp = pred.sort_index()
pred_left = pred_temp.loc(axis=0)[:, date_left_start:date_left_end]
pred_right = pred_temp.loc(axis=0)[:, date_right_begin:]
return pred_left, pred_right
|
split the score file into two part
Parameter
---------
pred : pd.DataFrame (index:<instrument, datetime>)
A score file of stocks
number: the number of dates for pred_left
split_date: the last date of the pred_left
Return
-------
pred_left : pd.DataFrame (index:<instrument, datetime>)
The first part of original score file
pred_right : pd.DataFrame (index:<instrument, datetime>)
The second part of original score file
|
split_pred
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def time_to_slc_point(t: Union[None, str, pd.Timestamp]) -> Union[None, pd.Timestamp]:
"""
Time slicing in Qlib or Pandas is a frequently-used action.
However, user often input all kinds of data format to represent time.
This function will help user to convert these inputs into a uniform format which is friendly to time slicing.
Parameters
----------
t : Union[None, str, pd.Timestamp]
original time
Returns
-------
Union[None, pd.Timestamp]:
"""
if t is None:
# None represents unbounded in Qlib or Pandas(e.g. df.loc[slice(None, "20210303")]).
return t
else:
return pd.Timestamp(t)
|
Time slicing in Qlib or Pandas is a frequently-used action.
However, user often input all kinds of data format to represent time.
This function will help user to convert these inputs into a uniform format which is friendly to time slicing.
Parameters
----------
t : Union[None, str, pd.Timestamp]
original time
Returns
-------
Union[None, pd.Timestamp]:
|
time_to_slc_point
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def lazy_sort_index(df: pd.DataFrame, axis=0) -> pd.DataFrame:
"""
make the df index sorted
df.sort_index() will take a lot of time even when `df.is_lexsorted() == True`
This function could avoid such case
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame:
sorted dataframe
"""
idx = df.index if axis == 0 else df.columns
if (
not idx.is_monotonic_increasing
or not is_deprecated_lexsorted_pandas
and isinstance(idx, pd.MultiIndex)
and not idx.is_lexsorted()
): # this case is for the old version
return df.sort_index(axis=axis)
else:
return df
|
make the df index sorted
df.sort_index() will take a lot of time even when `df.is_lexsorted() == True`
This function could avoid such case
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame:
sorted dataframe
|
lazy_sort_index
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def flatten_dict(d, parent_key="", sep=".") -> dict:
"""
Flatten a nested dict.
>>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
>>> {'a': 1, 'c.a': 2, 'c.b.x': 5, 'd': [1, 2, 3], 'c.b.y': 10}
>>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}, sep=FLATTEN_TUPLE)
>>> {'a': 1, ('c','a'): 2, ('c','b','x'): 5, 'd': [1, 2, 3], ('c','b','y'): 10}
Args:
d (dict): the dict waiting for flatting
parent_key (str, optional): the parent key, will be a prefix in new key. Defaults to "".
sep (str, optional): the separator for string connecting. FLATTEN_TUPLE for tuple connecting.
Returns:
dict: flatten dict
"""
items = []
for k, v in d.items():
if sep == FLATTEN_TUPLE:
new_key = (parent_key, k) if parent_key else k
else:
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
|
Flatten a nested dict.
>>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
>>> {'a': 1, 'c.a': 2, 'c.b.x': 5, 'd': [1, 2, 3], 'c.b.y': 10}
>>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}, sep=FLATTEN_TUPLE)
>>> {'a': 1, ('c','a'): 2, ('c','b','x'): 5, 'd': [1, 2, 3], ('c','b','y'): 10}
Args:
d (dict): the dict waiting for flatting
parent_key (str, optional): the parent key, will be a prefix in new key. Defaults to "".
sep (str, optional): the separator for string connecting. FLATTEN_TUPLE for tuple connecting.
Returns:
dict: flatten dict
|
flatten_dict
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def get_item_from_obj(config: dict, name_path: str) -> object:
"""
Follow the name_path to get values from config
For example:
If we follow the example in in the Parameters section,
Timestamp('2008-01-02 00:00:00') will be returned
Parameters
----------
config : dict
e.g.
{'dataset': {'class': 'DatasetH',
'kwargs': {'handler': {'class': 'Alpha158',
'kwargs': {'end_time': '2020-08-01',
'fit_end_time': '<dataset.kwargs.segments.train.1>',
'fit_start_time': '<dataset.kwargs.segments.train.0>',
'instruments': 'csi100',
'start_time': '2008-01-01'},
'module_path': 'qlib.contrib.data.handler'},
'segments': {'test': (Timestamp('2017-01-03 00:00:00'),
Timestamp('2019-04-08 00:00:00')),
'train': (Timestamp('2008-01-02 00:00:00'),
Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'),
Timestamp('2016-12-30 00:00:00'))}}
}}
name_path : str
e.g.
"dataset.kwargs.segments.train.1"
Returns
-------
object
the retrieved object
"""
cur_cfg = config
for k in name_path.split("."):
if isinstance(cur_cfg, dict):
cur_cfg = cur_cfg[k] # may raise KeyError
elif k.isdigit():
cur_cfg = cur_cfg[int(k)] # may raise IndexError
else:
raise ValueError(f"Error when getting {k} from cur_cfg")
return cur_cfg
|
Follow the name_path to get values from config
For example:
If we follow the example in in the Parameters section,
Timestamp('2008-01-02 00:00:00') will be returned
Parameters
----------
config : dict
e.g.
{'dataset': {'class': 'DatasetH',
'kwargs': {'handler': {'class': 'Alpha158',
'kwargs': {'end_time': '2020-08-01',
'fit_end_time': '<dataset.kwargs.segments.train.1>',
'fit_start_time': '<dataset.kwargs.segments.train.0>',
'instruments': 'csi100',
'start_time': '2008-01-01'},
'module_path': 'qlib.contrib.data.handler'},
'segments': {'test': (Timestamp('2017-01-03 00:00:00'),
Timestamp('2019-04-08 00:00:00')),
'train': (Timestamp('2008-01-02 00:00:00'),
Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'),
Timestamp('2016-12-30 00:00:00'))}}
}}
name_path : str
e.g.
"dataset.kwargs.segments.train.1"
Returns
-------
object
the retrieved object
|
get_item_from_obj
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def fill_placeholder(config: dict, config_extend: dict):
"""
Detect placeholder in config and fill them with config_extend.
The item of dict must be single item(int, str, etc), dict and list. Tuples are not supported.
There are two type of variables:
- user-defined variables :
e.g. when config_extend is `{"<MODEL>": model, "<DATASET>": dataset}`, "<MODEL>" and "<DATASET>" in `config` will be replaced with `model` `dataset`
- variables extracted from `config` :
e.g. the variables like "<dataset.kwargs.segments.train.0>" will be replaced with the values from `config`
Parameters
----------
config : dict
the parameter dict will be filled
config_extend : dict
the value of all placeholders
Returns
-------
dict
the parameter dict
"""
# check the format of config_extend
for placeholder in config_extend.keys():
assert re.match(r"<[^<>]+>", placeholder)
# bfs
top = 0
tail = 1
item_queue = [config]
def try_replace_placeholder(value):
if value in config_extend.keys():
value = config_extend[value]
else:
m = re.match(r"<(?P<name_path>[^<>]+)>", value)
if m is not None:
try:
value = get_item_from_obj(config, m.groupdict()["name_path"])
except (KeyError, ValueError, IndexError):
get_module_logger("fill_placeholder").info(
f"{value} lookes like a placeholder, but it can't match to any given values"
)
return value
item_keys = None
while top < tail:
now_item = item_queue[top]
top += 1
if isinstance(now_item, list):
item_keys = range(len(now_item))
elif isinstance(now_item, dict):
item_keys = now_item.keys()
for key in item_keys: # noqa
if isinstance(now_item[key], (list, dict)):
item_queue.append(now_item[key])
tail += 1
elif isinstance(now_item[key], str):
# If it is a string, try to replace it with placeholder
now_item[key] = try_replace_placeholder(now_item[key])
return config
|
Detect placeholder in config and fill them with config_extend.
The item of dict must be single item(int, str, etc), dict and list. Tuples are not supported.
There are two type of variables:
- user-defined variables :
e.g. when config_extend is `{"<MODEL>": model, "<DATASET>": dataset}`, "<MODEL>" and "<DATASET>" in `config` will be replaced with `model` `dataset`
- variables extracted from `config` :
e.g. the variables like "<dataset.kwargs.segments.train.0>" will be replaced with the values from `config`
Parameters
----------
config : dict
the parameter dict will be filled
config_extend : dict
the value of all placeholders
Returns
-------
dict
the parameter dict
|
fill_placeholder
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def auto_filter_kwargs(func: Callable, warning=True) -> Callable:
"""
this will work like a decoration function
The decrated function will ignore and give warning when the parameter is not acceptable
For example, if you have a function `f` which may optionally consume the keywards `bar`.
then you can call it by `auto_filter_kwargs(f)(bar=3)`, which will automatically filter out
`bar` when f does not need bar
Parameters
----------
func : Callable
The original function
Returns
-------
Callable:
the new callable function
"""
def _func(*args, **kwargs):
spec = inspect.getfullargspec(func)
new_kwargs = {}
for k, v in kwargs.items():
# if `func` don't accept variable keyword arguments like `**kwargs` and have not according named arguments
if spec.varkw is None and k not in spec.args:
if warning:
log.warning(f"The parameter `{k}` with value `{v}` is ignored.")
else:
new_kwargs[k] = v
return func(*args, **new_kwargs)
return _func
|
this will work like a decoration function
The decrated function will ignore and give warning when the parameter is not acceptable
For example, if you have a function `f` which may optionally consume the keywards `bar`.
then you can call it by `auto_filter_kwargs(f)(bar=3)`, which will automatically filter out
`bar` when f does not need bar
Parameters
----------
func : Callable
The original function
Returns
-------
Callable:
the new callable function
|
auto_filter_kwargs
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def register_wrapper(wrapper, cls_or_obj, module_path=None):
"""register_wrapper
:param wrapper: A wrapper.
:param cls_or_obj: A class or class name or object instance.
"""
if isinstance(cls_or_obj, str):
module = get_module_by_module_path(module_path)
cls_or_obj = getattr(module, cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
|
register_wrapper
:param wrapper: A wrapper.
:param cls_or_obj: A class or class name or object instance.
|
register_wrapper
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def load_dataset(path_or_obj, index_col=[0, 1]):
"""load dataset from multiple file formats"""
if isinstance(path_or_obj, pd.DataFrame):
return path_or_obj
if not os.path.exists(path_or_obj):
raise ValueError(f"file {path_or_obj} doesn't exist")
_, extension = os.path.splitext(path_or_obj)
if extension == ".h5":
return pd.read_hdf(path_or_obj)
elif extension == ".pkl":
return pd.read_pickle(path_or_obj)
elif extension == ".csv":
return pd.read_csv(path_or_obj, parse_dates=True, index_col=index_col)
raise ValueError(f"unsupported file type `{extension}`")
|
load dataset from multiple file formats
|
load_dataset
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def code_to_fname(code: str):
"""stock code to file name
Parameters
----------
code: str
"""
# NOTE: In windows, the following name is I/O device, and the file with the corresponding name cannot be created
# reference: https://superuser.com/questions/86999/why-cant-i-name-a-folder-or-file-con-in-windows
replace_names = ["CON", "PRN", "AUX", "NUL"]
replace_names += [f"COM{i}" for i in range(10)]
replace_names += [f"LPT{i}" for i in range(10)]
prefix = "_qlib_"
if str(code).upper() in replace_names:
code = prefix + str(code)
return code
|
stock code to file name
Parameters
----------
code: str
|
code_to_fname
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def fname_to_code(fname: str):
"""file name to stock code
Parameters
----------
fname: str
"""
prefix = "_qlib_"
if fname.startswith(prefix):
fname = fname.lstrip(prefix)
return fname
|
file name to stock code
Parameters
----------
fname: str
|
fname_to_code
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def sys_config(config, config_path):
"""
Configure the `sys` section
Parameters
----------
config : dict
configuration of the workflow.
config_path : str
path of the configuration
"""
sys_config = config.get("sys", {})
# abspath
for p in get_path_list(sys_config.get("path", [])):
sys.path.append(p)
# relative path to config path
for p in get_path_list(sys_config.get("rel_path", [])):
sys.path.append(str(Path(config_path).parent.resolve().absolute() / p))
|
Configure the `sys` section
Parameters
----------
config : dict
configuration of the workflow.
config_path : str
path of the configuration
|
sys_config
|
python
|
microsoft/qlib
|
qlib/workflow/cli.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/cli.py
|
MIT
|
def render_template(config_path: str) -> str:
"""
render the template based on the environment
Parameters
----------
config_path : str
configuration path
Returns
-------
str
the rendered content
"""
with open(config_path, "r") as f:
config = f.read()
# Set up the Jinja2 environment
template = Template(config)
# Parse the template to find undeclared variables
env = template.environment
parsed_content = env.parse(config)
variables = meta.find_undeclared_variables(parsed_content)
# Get context from os.environ according to the variables
context = {var: os.getenv(var, "") for var in variables if var in os.environ}
logger.info(f"Render the template with the context: {context}")
# Render the template with the context
rendered_content = template.render(context)
return rendered_content
|
render the template based on the environment
Parameters
----------
config_path : str
configuration path
Returns
-------
str
the rendered content
|
render_template
|
python
|
microsoft/qlib
|
qlib/workflow/cli.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/cli.py
|
MIT
|
def workflow(config_path, experiment_name="workflow", uri_folder="mlruns"):
"""
This is a Qlib CLI entrance.
User can run the whole Quant research workflow defined by a configure file
- the code is located here ``qlib/workflow/cli.py`
User can specify a base_config file in your workflow.yml file by adding "BASE_CONFIG_PATH".
Qlib will load the configuration in BASE_CONFIG_PATH first, and the user only needs to update the custom fields
in their own workflow.yml file.
For examples:
qlib_init:
provider_uri: "~/.qlib/qlib_data/cn_data"
region: cn
BASE_CONFIG_PATH: "workflow_config_lightgbm_Alpha158_csi500.yaml"
market: csi300
"""
# Render the template
rendered_yaml = render_template(config_path)
yaml = YAML(typ="safe", pure=True)
config = yaml.load(rendered_yaml)
base_config_path = config.get("BASE_CONFIG_PATH", None)
if base_config_path:
logger.info(f"Use BASE_CONFIG_PATH: {base_config_path}")
base_config_path = Path(base_config_path)
# it will find config file in absolute path and relative path
if base_config_path.exists():
path = base_config_path
else:
logger.info(
f"Can't find BASE_CONFIG_PATH base on: {Path.cwd()}, "
f"try using relative path to config path: {Path(config_path).absolute()}"
)
relative_path = Path(config_path).absolute().parent.joinpath(base_config_path)
if relative_path.exists():
path = relative_path
else:
raise FileNotFoundError(f"Can't find the BASE_CONFIG file: {base_config_path}")
with open(path) as fp:
yaml = YAML(typ="safe", pure=True)
base_config = yaml.load(fp)
logger.info(f"Load BASE_CONFIG_PATH succeed: {path.resolve()}")
config = update_config(base_config, config)
# config the `sys` section
sys_config(config, config_path)
if "exp_manager" in config.get("qlib_init"):
qlib.init(**config.get("qlib_init"))
else:
exp_manager = C["exp_manager"]
exp_manager["kwargs"]["uri"] = "file:" + str(Path(os.getcwd()).resolve() / uri_folder)
qlib.init(**config.get("qlib_init"), exp_manager=exp_manager)
if "experiment_name" in config:
experiment_name = config["experiment_name"]
recorder = task_train(config.get("task"), experiment_name=experiment_name)
recorder.save_objects(config=config)
|
This is a Qlib CLI entrance.
User can run the whole Quant research workflow defined by a configure file
- the code is located here ``qlib/workflow/cli.py`
User can specify a base_config file in your workflow.yml file by adding "BASE_CONFIG_PATH".
Qlib will load the configuration in BASE_CONFIG_PATH first, and the user only needs to update the custom fields
in their own workflow.yml file.
For examples:
qlib_init:
provider_uri: "~/.qlib/qlib_data/cn_data"
region: cn
BASE_CONFIG_PATH: "workflow_config_lightgbm_Alpha158_csi500.yaml"
market: csi300
|
workflow
|
python
|
microsoft/qlib
|
qlib/workflow/cli.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/cli.py
|
MIT
|
def get_recorder(self, recorder_id=None, recorder_name=None, create: bool = True, start: bool = False) -> Recorder:
"""
Retrieve a Recorder for user. When user specify recorder id and name, the method will try to return the
specific recorder. When user does not provide recorder id or name, the method will try to return the current
active recorder. The `create` argument determines whether the method will automatically create a new recorder
according to user's specification if the recorder hasn't been created before.
* If `create` is True:
* If `active recorder` exists:
* no id or name specified, return the active recorder.
* if id or name is specified, return the specified recorder. If no such exp found, create a new recorder with given id or name. If `start` is set to be True, the recorder is set to be active.
* If `active recorder` not exists:
* no id or name specified, create a new recorder.
* if id or name is specified, return the specified experiment. If no such exp found, create a new recorder with given id or name. If `start` is set to be True, the recorder is set to be active.
* Else If `create` is False:
* If `active recorder` exists:
* no id or name specified, return the active recorder.
* if id or name is specified, return the specified recorder. If no such exp found, raise Error.
* If `active recorder` not exists:
* no id or name specified, raise Error.
* if id or name is specified, return the specified recorder. If no such exp found, raise Error.
Parameters
----------
recorder_id : str
the id of the recorder to be deleted.
recorder_name : str
the name of the recorder to be deleted.
create : boolean
create the recorder if it hasn't been created before.
start : boolean
start the new recorder if one is **created**.
Returns
-------
A recorder object.
"""
# special case of getting the recorder
if recorder_id is None and recorder_name is None:
if self.active_recorder is not None:
return self.active_recorder
recorder_name = self._default_rec_name
if create:
recorder, is_new = self._get_or_create_rec(recorder_id=recorder_id, recorder_name=recorder_name)
else:
recorder, is_new = (
self._get_recorder(recorder_id=recorder_id, recorder_name=recorder_name),
False,
)
if is_new and start:
self.active_recorder = recorder
# start the recorder
self.active_recorder.start_run()
return recorder
|
Retrieve a Recorder for user. When user specify recorder id and name, the method will try to return the
specific recorder. When user does not provide recorder id or name, the method will try to return the current
active recorder. The `create` argument determines whether the method will automatically create a new recorder
according to user's specification if the recorder hasn't been created before.
* If `create` is True:
* If `active recorder` exists:
* no id or name specified, return the active recorder.
* if id or name is specified, return the specified recorder. If no such exp found, create a new recorder with given id or name. If `start` is set to be True, the recorder is set to be active.
* If `active recorder` not exists:
* no id or name specified, create a new recorder.
* if id or name is specified, return the specified experiment. If no such exp found, create a new recorder with given id or name. If `start` is set to be True, the recorder is set to be active.
* Else If `create` is False:
* If `active recorder` exists:
* no id or name specified, return the active recorder.
* if id or name is specified, return the specified recorder. If no such exp found, raise Error.
* If `active recorder` not exists:
* no id or name specified, raise Error.
* if id or name is specified, return the specified recorder. If no such exp found, raise Error.
Parameters
----------
recorder_id : str
the id of the recorder to be deleted.
recorder_name : str
the name of the recorder to be deleted.
create : boolean
create the recorder if it hasn't been created before.
start : boolean
start the new recorder if one is **created**.
Returns
-------
A recorder object.
|
get_recorder
|
python
|
microsoft/qlib
|
qlib/workflow/exp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/exp.py
|
MIT
|
def _get_or_create_rec(self, recorder_id=None, recorder_name=None) -> (object, bool):
"""
Method for getting or creating a recorder. It will try to first get a valid recorder, if exception occurs, it will
automatically create a new recorder based on the given id and name.
"""
try:
if recorder_id is None and recorder_name is None:
recorder_name = self._default_rec_name
return (
self._get_recorder(recorder_id=recorder_id, recorder_name=recorder_name),
False,
)
except ValueError:
if recorder_name is None:
recorder_name = self._default_rec_name
logger.info(f"No valid recorder found. Create a new recorder with name {recorder_name}.")
return self.create_recorder(recorder_name), True
|
Method for getting or creating a recorder. It will try to first get a valid recorder, if exception occurs, it will
automatically create a new recorder based on the given id and name.
|
_get_or_create_rec
|
python
|
microsoft/qlib
|
qlib/workflow/exp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/exp.py
|
MIT
|
def list_recorders(
self, rtype: Literal["dict", "list"] = RT_D, **flt_kwargs
) -> Union[List[Recorder], Dict[str, Recorder]]:
"""
List all the existing recorders of this experiment. Please first get the experiment instance before calling this method.
If user want to use the method `R.list_recorders()`, please refer to the related API document in `QlibRecorder`.
flt_kwargs : dict
filter recorders by conditions
e.g. list_recorders(status=Recorder.STATUS_FI)
Returns
-------
The return type depends on `rtype`
if `rtype` == "dict":
A dictionary (id -> recorder) of recorder information that being stored.
elif `rtype` == "list":
A list of Recorder.
"""
raise NotImplementedError(f"Please implement the `list_recorders` method.")
|
List all the existing recorders of this experiment. Please first get the experiment instance before calling this method.
If user want to use the method `R.list_recorders()`, please refer to the related API document in `QlibRecorder`.
flt_kwargs : dict
filter recorders by conditions
e.g. list_recorders(status=Recorder.STATUS_FI)
Returns
-------
The return type depends on `rtype`
if `rtype` == "dict":
A dictionary (id -> recorder) of recorder information that being stored.
elif `rtype` == "list":
A list of Recorder.
|
list_recorders
|
python
|
microsoft/qlib
|
qlib/workflow/exp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/exp.py
|
MIT
|
def _get_recorder(self, recorder_id=None, recorder_name=None):
"""
Method for getting or creating a recorder. It will try to first get a valid recorder, if exception occurs, it will
raise errors.
Quoting docs of search_runs from MLflow
> The default ordering is to sort by start_time DESC, then run_id.
"""
assert (
recorder_id is not None or recorder_name is not None
), "Please input at least one of recorder id or name before retrieving recorder."
if recorder_id is not None:
try:
run = self._client.get_run(recorder_id)
recorder = MLflowRecorder(self.id, self._uri, mlflow_run=run)
return recorder
except MlflowException as mlflow_exp:
raise ValueError(
"No valid recorder has been found, please make sure the input recorder id is correct."
) from mlflow_exp
elif recorder_name is not None:
logger.warning(
f"Please make sure the recorder name {recorder_name} is unique, we will only return the latest recorder if there exist several matched the given name."
)
recorders = self.list_recorders()
for rid in recorders:
if recorders[rid].name == recorder_name:
return recorders[rid]
raise ValueError("No valid recorder has been found, please make sure the input recorder name is correct.")
|
Method for getting or creating a recorder. It will try to first get a valid recorder, if exception occurs, it will
raise errors.
Quoting docs of search_runs from MLflow
> The default ordering is to sort by start_time DESC, then run_id.
|
_get_recorder
|
python
|
microsoft/qlib
|
qlib/workflow/exp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/exp.py
|
MIT
|
def start_exp(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
**kwargs,
) -> Experiment:
"""
Start an experiment. This method includes first get_or_create an experiment, and then
set it to be active.
Maintaining `_active_exp_uri` is included in start_exp, remaining implementation should be included in _end_exp in subclass
Parameters
----------
experiment_id : str
id of the active experiment.
experiment_name : str
name of the active experiment.
recorder_id : str
id of the recorder to be started.
recorder_name : str
name of the recorder to be started.
uri : str
the current tracking URI.
resume : boolean
whether to resume the experiment and recorder.
Returns
-------
An active experiment.
"""
self._active_exp_uri = uri
# The subclass may set the underlying uri back.
# So setting `_active_exp_uri` come before `_start_exp`
return self._start_exp(
experiment_id=experiment_id,
experiment_name=experiment_name,
recorder_id=recorder_id,
recorder_name=recorder_name,
resume=resume,
**kwargs,
)
|
Start an experiment. This method includes first get_or_create an experiment, and then
set it to be active.
Maintaining `_active_exp_uri` is included in start_exp, remaining implementation should be included in _end_exp in subclass
Parameters
----------
experiment_id : str
id of the active experiment.
experiment_name : str
name of the active experiment.
recorder_id : str
id of the recorder to be started.
recorder_name : str
name of the recorder to be started.
uri : str
the current tracking URI.
resume : boolean
whether to resume the experiment and recorder.
Returns
-------
An active experiment.
|
start_exp
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def end_exp(self, recorder_status: Text = Recorder.STATUS_S, **kwargs):
"""
End an active experiment.
Maintaining `_active_exp_uri` is included in end_exp, remaining implementation should be included in _end_exp in subclass
Parameters
----------
experiment_name : str
name of the active experiment.
recorder_status : str
the status of the active recorder of the experiment.
"""
self._active_exp_uri = None
# The subclass may set the underlying uri back.
# So setting `_active_exp_uri` come before `_end_exp`
self._end_exp(recorder_status=recorder_status, **kwargs)
|
End an active experiment.
Maintaining `_active_exp_uri` is included in end_exp, remaining implementation should be included in _end_exp in subclass
Parameters
----------
experiment_name : str
name of the active experiment.
recorder_status : str
the status of the active recorder of the experiment.
|
end_exp
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def get_exp(self, *, experiment_id=None, experiment_name=None, create: bool = True, start: bool = False):
"""
Retrieve an experiment. This method includes getting an active experiment, and get_or_create a specific experiment.
When user specify experiment id and name, the method will try to return the specific experiment.
When user does not provide recorder id or name, the method will try to return the current active experiment.
The `create` argument determines whether the method will automatically create a new experiment according
to user's specification if the experiment hasn't been created before.
* If `create` is True:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* If `active experiment` not exists:
* no id or name specified, create a default experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* Else If `create` is False:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
* If `active experiment` not exists:
* no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Parameters
----------
experiment_id : str
id of the experiment to return.
experiment_name : str
name of the experiment to return.
create : boolean
create the experiment it if hasn't been created before.
start : boolean
start the new experiment if one is created.
Returns
-------
An experiment object.
"""
# special case of getting experiment
if experiment_id is None and experiment_name is None:
if self.active_experiment is not None:
return self.active_experiment
# User don't want get active code now.
experiment_name = self._default_exp_name
if create:
exp, _ = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
else:
exp = self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name)
if self.active_experiment is None and start:
self.active_experiment = exp
# start the recorder
self.active_experiment.start()
return exp
|
Retrieve an experiment. This method includes getting an active experiment, and get_or_create a specific experiment.
When user specify experiment id and name, the method will try to return the specific experiment.
When user does not provide recorder id or name, the method will try to return the current active experiment.
The `create` argument determines whether the method will automatically create a new experiment according
to user's specification if the experiment hasn't been created before.
* If `create` is True:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* If `active experiment` not exists:
* no id or name specified, create a default experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* Else If `create` is False:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
* If `active experiment` not exists:
* no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Parameters
----------
experiment_id : str
id of the experiment to return.
experiment_name : str
name of the experiment to return.
create : boolean
create the experiment it if hasn't been created before.
start : boolean
start the new experiment if one is created.
Returns
-------
An experiment object.
|
get_exp
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def _get_or_create_exp(self, experiment_id=None, experiment_name=None) -> (object, bool):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
automatically create a new experiment based on the given id and name.
"""
try:
return (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
except ValueError:
if experiment_name is None:
experiment_name = self._default_exp_name
logger.warning(f"No valid experiment found. Create a new experiment with name {experiment_name}.")
# NOTE: mlflow doesn't consider the lock for recording multiple runs
# So we supported it in the interface wrapper
pr = urlparse(self.uri)
if pr.scheme == "file":
with FileLock(Path(os.path.join(pr.netloc, pr.path.lstrip("/"), "filelock"))): # pylint: disable=E0110
return self.create_exp(experiment_name), True
# NOTE: for other schemes like http, we double check to avoid create exp conflicts
try:
return self.create_exp(experiment_name), True
except ExpAlreadyExistError:
return (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
|
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
automatically create a new experiment based on the given id and name.
|
_get_or_create_exp
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def default_uri(self):
"""
Get the default tracking URI from qlib.config.C
"""
if "kwargs" not in C.exp_manager or "uri" not in C.exp_manager["kwargs"]:
raise ValueError("The default URI is not set in qlib.config.C")
return C.exp_manager["kwargs"]["uri"]
|
Get the default tracking URI from qlib.config.C
|
default_uri
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def _get_exp(self, experiment_id=None, experiment_name=None):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
raise errors.
"""
assert (
experiment_id is not None or experiment_name is not None
), "Please input at least one of experiment/recorder id or name before retrieving experiment/recorder."
if experiment_id is not None:
try:
# NOTE: the mlflow's experiment_id must be str type...
# https://www.mlflow.org/docs/latest/python_api/mlflow.tracking.html#mlflow.tracking.MlflowClient.get_experiment
exp = self.client.get_experiment(experiment_id)
if exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
return experiment
except MlflowException as e:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment id is correct."
) from e
elif experiment_name is not None:
try:
exp = self.client.get_experiment_by_name(experiment_name)
if exp is None or exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, experiment_name, self.uri)
return experiment
except MlflowException as e:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment name is correct."
) from e
|
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
raise errors.
|
_get_exp
|
python
|
microsoft/qlib
|
qlib/workflow/expm.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/expm.py
|
MIT
|
def get_local_dir(self):
"""
This function will return the directory path of this recorder.
"""
if self.artifact_uri is not None:
if platform.system() == "Windows":
local_dir_path = Path(self.artifact_uri.lstrip("file:").lstrip("/")).parent
else:
local_dir_path = Path(self.artifact_uri.lstrip("file:")).parent
local_dir_path = str(local_dir_path.resolve())
if os.path.isdir(local_dir_path):
return local_dir_path
else:
raise RuntimeError("This recorder is not saved in the local file system.")
else:
raise ValueError(
"Please make sure the recorder has been created and started properly before getting artifact uri."
)
|
This function will return the directory path of this recorder.
|
get_local_dir
|
python
|
microsoft/qlib
|
qlib/workflow/recorder.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/recorder.py
|
MIT
|
def load_object(self, name, unpickler=pickle.Unpickler):
"""
Load object such as prediction file or model checkpoint in mlflow.
Args:
name (str): the object name
unpickler: Supporting using custom unpickler
Raises:
LoadObjectError: if raise some exceptions when load the object
Returns:
object: the saved object in mlflow.
"""
assert self.uri is not None, "Please start the experiment and recorder first before using recorder directly."
path = None
try:
path = self.client.download_artifacts(self.id, name)
with Path(path).open("rb") as f:
data = unpickler(f).load()
return data
except Exception as e:
raise LoadObjectError(str(e)) from e
finally:
ar = self.client._tracking_client._get_artifact_repo(self.id)
if isinstance(ar, AzureBlobArtifactRepository) and path is not None:
# for saving disk space
# For safety, only remove redundant file for specific ArtifactRepository
shutil.rmtree(Path(path).absolute().parent)
|
Load object such as prediction file or model checkpoint in mlflow.
Args:
name (str): the object name
unpickler: Supporting using custom unpickler
Raises:
LoadObjectError: if raise some exceptions when load the object
Returns:
object: the saved object in mlflow.
|
load_object
|
python
|
microsoft/qlib
|
qlib/workflow/recorder.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/recorder.py
|
MIT
|
def save(self, **kwargs):
"""
It behaves the same as self.recorder.save_objects.
But it is an easier interface because users don't have to care about `get_path` and `artifact_path`
"""
art_path = self.get_path()
if art_path == "":
art_path = None
self.recorder.save_objects(artifact_path=art_path, **kwargs)
|
It behaves the same as self.recorder.save_objects.
But it is an easier interface because users don't have to care about `get_path` and `artifact_path`
|
save
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def load(self, name: str, parents: bool = True):
"""
It behaves the same as self.recorder.load_object.
But it is an easier interface because users don't have to care about `get_path` and `artifact_path`
Parameters
----------
name : str
the name for the file to be load.
parents : bool
Each recorder has different `artifact_path`.
So parents recursively find the path in parents
Sub classes has higher priority
Return
------
The stored records.
"""
try:
return self.recorder.load_object(self.get_path(name))
except LoadObjectError as e:
if parents:
if self.depend_cls is not None:
with class_casting(self, self.depend_cls):
return self.load(name, parents=True)
raise e
|
It behaves the same as self.recorder.load_object.
But it is an easier interface because users don't have to care about `get_path` and `artifact_path`
Parameters
----------
name : str
the name for the file to be load.
parents : bool
Each recorder has different `artifact_path`.
So parents recursively find the path in parents
Sub classes has higher priority
Return
------
The stored records.
|
load
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def check(self, include_self: bool = False, parents: bool = True):
"""
Check if the records is properly generated and saved.
It is useful in following examples
- checking if the dependant files complete before generating new things.
- checking if the final files is completed
Parameters
----------
include_self : bool
is the file generated by self included
parents : bool
will we check parents
Raise
------
FileNotFoundError
whether the records are stored properly.
"""
if include_self:
# Some mlflow backend will not list the directly recursively.
# So we force to the directly
artifacts = {}
def _get_arts(dirn):
if dirn not in artifacts:
artifacts[dirn] = self.recorder.list_artifacts(dirn)
return artifacts[dirn]
for item in self.list():
ps = self.get_path(item).split("/")
dirn = "/".join(ps[:-1])
if self.get_path(item) not in _get_arts(dirn):
raise FileNotFoundError
if parents:
if self.depend_cls is not None:
with class_casting(self, self.depend_cls):
self.check(include_self=True)
|
Check if the records is properly generated and saved.
It is useful in following examples
- checking if the dependant files complete before generating new things.
- checking if the final files is completed
Parameters
----------
include_self : bool
is the file generated by self included
parents : bool
will we check parents
Raise
------
FileNotFoundError
whether the records are stored properly.
|
check
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def generate(self, *args, **kwargs):
"""automatically checking the files and then run the concrete generating task"""
if self.skip_existing:
try:
self.check(include_self=True, parents=False)
except FileNotFoundError:
pass # continue to generating metrics
else:
logger.info("The results has previously generated, Generation skipped.")
return
try:
self.check()
except FileNotFoundError:
logger.warning("The dependent data does not exists. Generation skipped.")
return
artifact_dict = self._generate(*args, **kwargs)
if isinstance(artifact_dict, dict):
self.save(**artifact_dict)
return artifact_dict
|
automatically checking the files and then run the concrete generating task
|
generate
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def _generate(self, label: Optional[pd.DataFrame] = None, **kwargs):
"""
Parameters
----------
label : Optional[pd.DataFrame]
Label should be a dataframe.
"""
pred = self.load("pred.pkl")
if label is None:
label = self.load("label.pkl")
if label is None or not isinstance(label, pd.DataFrame) or label.empty:
logger.warning(f"Empty label.")
return
ic, ric = calc_ic(pred.iloc[:, 0], label.iloc[:, self.label_col])
metrics = {
"IC": ic.mean(),
"ICIR": ic.mean() / ic.std(),
"Rank IC": ric.mean(),
"Rank ICIR": ric.mean() / ric.std(),
}
objects = {"ic.pkl": ic, "ric.pkl": ric}
if self.ana_long_short:
long_short_r, long_avg_r = calc_long_short_return(pred.iloc[:, 0], label.iloc[:, self.label_col])
metrics.update(
{
"Long-Short Ann Return": long_short_r.mean() * self.ann_scaler,
"Long-Short Ann Sharpe": long_short_r.mean() / long_short_r.std() * self.ann_scaler**0.5,
"Long-Avg Ann Return": long_avg_r.mean() * self.ann_scaler,
"Long-Avg Ann Sharpe": long_avg_r.mean() / long_avg_r.std() * self.ann_scaler**0.5,
}
)
objects.update(
{
"long_short_r.pkl": long_short_r,
"long_avg_r.pkl": long_avg_r,
}
)
self.recorder.log_metrics(**metrics)
pprint(metrics)
return objects
|
Parameters
----------
label : Optional[pd.DataFrame]
Label should be a dataframe.
|
_generate
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def __init__(
self,
recorder,
config=None,
risk_analysis_freq: Union[List, str] = None,
indicator_analysis_freq: Union[List, str] = None,
indicator_analysis_method=None,
skip_existing=False,
**kwargs,
):
"""
config["strategy"] : dict
define the strategy class as well as the kwargs.
config["executor"] : dict
define the executor class as well as the kwargs.
config["backtest"] : dict
define the backtest kwargs.
risk_analysis_freq : str|List[str]
risk analysis freq of report
indicator_analysis_freq : str|List[str]
indicator analysis freq of report
indicator_analysis_method : str, optional, default by None
the candidate values include 'mean', 'amount_weighted', 'value_weighted'
"""
super().__init__(recorder=recorder, skip_existing=skip_existing, **kwargs)
if config is None:
config = { # Default config for daily trading
"strategy": {
"class": "TopkDropoutStrategy",
"module_path": "qlib.contrib.strategy",
"kwargs": {"signal": "<PRED>", "topk": 50, "n_drop": 5},
},
"backtest": {
"start_time": None,
"end_time": None,
"account": 100000000,
"benchmark": "SH000300",
"exchange_kwargs": {
"limit_threshold": 0.095,
"deal_price": "close",
"open_cost": 0.0005,
"close_cost": 0.0015,
"min_cost": 5,
},
},
}
# We only deepcopy_basic_type because
# - We don't want to affect the config outside.
# - We don't want to deepcopy complex object to avoid overhead
config = deepcopy_basic_type(config)
self.strategy_config = config["strategy"]
_default_executor_config = {
"class": "SimulatorExecutor",
"module_path": "qlib.backtest.executor",
"kwargs": {
"time_per_step": "day",
"generate_portfolio_metrics": True,
},
}
self.executor_config = config.get("executor", _default_executor_config)
self.backtest_config = config["backtest"]
self.all_freq = self._get_report_freq(self.executor_config)
if risk_analysis_freq is None:
risk_analysis_freq = [self.all_freq[0]]
if indicator_analysis_freq is None:
indicator_analysis_freq = [self.all_freq[0]]
if isinstance(risk_analysis_freq, str):
risk_analysis_freq = [risk_analysis_freq]
if isinstance(indicator_analysis_freq, str):
indicator_analysis_freq = [indicator_analysis_freq]
self.risk_analysis_freq = [
"{0}{1}".format(*Freq.parse(_analysis_freq)) for _analysis_freq in risk_analysis_freq
]
self.indicator_analysis_freq = [
"{0}{1}".format(*Freq.parse(_analysis_freq)) for _analysis_freq in indicator_analysis_freq
]
self.indicator_analysis_method = indicator_analysis_method
|
config["strategy"] : dict
define the strategy class as well as the kwargs.
config["executor"] : dict
define the executor class as well as the kwargs.
config["backtest"] : dict
define the backtest kwargs.
risk_analysis_freq : str|List[str]
risk analysis freq of report
indicator_analysis_freq : str|List[str]
indicator analysis freq of report
indicator_analysis_method : str, optional, default by None
the candidate values include 'mean', 'amount_weighted', 'value_weighted'
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def __init__(self, recorder, pass_num=10, shuffle_init_score=True, **kwargs):
"""
Parameters
----------
recorder : Recorder
The recorder used to save the backtest results.
pass_num : int
The number of backtest passes.
shuffle_init_score : bool
Whether to shuffle the prediction score of the first backtest date.
"""
self.pass_num = pass_num
self.shuffle_init_score = shuffle_init_score
super().__init__(recorder, **kwargs)
# Save original strategy so that pred df can be replaced in next generate
self.original_strategy = deepcopy_basic_type(self.strategy_config)
if not isinstance(self.original_strategy, dict):
raise QlibException("MultiPassPortAnaRecord require the passed in strategy to be a dict")
if "signal" not in self.original_strategy.get("kwargs", {}):
raise QlibException("MultiPassPortAnaRecord require the passed in strategy to have signal as a parameter")
|
Parameters
----------
recorder : Recorder
The recorder used to save the backtest results.
pass_num : int
The number of backtest passes.
shuffle_init_score : bool
Whether to shuffle the prediction score of the first backtest date.
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/record_temp.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/record_temp.py
|
MIT
|
def experiment_exception_hook(exc_type, value, tb):
"""
End an experiment with status to be "FAILED". This exception tries to catch those uncaught exception
and end the experiment automatically.
Parameters
exc_type: Exception type
value: Exception's value
tb: Exception's traceback
"""
logger.error(f"An exception has been raised[{exc_type.__name__}: {value}].")
# Same as original format
traceback.print_tb(tb)
print(f"{exc_type.__name__}: {value}")
R.end_exp(recorder_status=Recorder.STATUS_FA)
|
End an experiment with status to be "FAILED". This exception tries to catch those uncaught exception
and end the experiment automatically.
Parameters
exc_type: Exception type
value: Exception's value
tb: Exception's traceback
|
experiment_exception_hook
|
python
|
microsoft/qlib
|
qlib/workflow/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/utils.py
|
MIT
|
def start(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
):
"""
Method to start an experiment. This method can only be called within a Python's `with` statement. Here is the example code:
.. code-block:: Python
# start new experiment and recorder
with R.start(experiment_name='test', recorder_name='recorder_1'):
model.fit(dataset)
R.log...
... # further operations
# resume previous experiment and recorder
with R.start(experiment_name='test', recorder_name='recorder_1', resume=True): # if users want to resume recorder, they have to specify the exact same name for experiment and recorder.
... # further operations
Parameters
----------
experiment_id : str
id of the experiment one wants to start.
experiment_name : str
name of the experiment one wants to start.
recorder_id : str
id of the recorder under the experiment one wants to start.
recorder_name : str
name of the recorder under the experiment one wants to start.
uri : str
The tracking uri of the experiment, where all the artifacts/metrics etc. will be stored.
The default uri is set in the qlib.config. Note that this uri argument will not change the one defined in the config file.
Therefore, the next time when users call this function in the same experiment,
they have to also specify this argument with the same value. Otherwise, inconsistent uri may occur.
resume : bool
whether to resume the specific recorder with given name under the given experiment.
"""
run = self.start_exp(
experiment_id=experiment_id,
experiment_name=experiment_name,
recorder_id=recorder_id,
recorder_name=recorder_name,
uri=uri,
resume=resume,
)
try:
yield run
except Exception as e:
self.end_exp(Recorder.STATUS_FA) # end the experiment if something went wrong
raise e
self.end_exp(Recorder.STATUS_FI)
|
Method to start an experiment. This method can only be called within a Python's `with` statement. Here is the example code:
.. code-block:: Python
# start new experiment and recorder
with R.start(experiment_name='test', recorder_name='recorder_1'):
model.fit(dataset)
R.log...
... # further operations
# resume previous experiment and recorder
with R.start(experiment_name='test', recorder_name='recorder_1', resume=True): # if users want to resume recorder, they have to specify the exact same name for experiment and recorder.
... # further operations
Parameters
----------
experiment_id : str
id of the experiment one wants to start.
experiment_name : str
name of the experiment one wants to start.
recorder_id : str
id of the recorder under the experiment one wants to start.
recorder_name : str
name of the recorder under the experiment one wants to start.
uri : str
The tracking uri of the experiment, where all the artifacts/metrics etc. will be stored.
The default uri is set in the qlib.config. Note that this uri argument will not change the one defined in the config file.
Therefore, the next time when users call this function in the same experiment,
they have to also specify this argument with the same value. Otherwise, inconsistent uri may occur.
resume : bool
whether to resume the specific recorder with given name under the given experiment.
|
start
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def start_exp(
self,
*,
experiment_id=None,
experiment_name=None,
recorder_id=None,
recorder_name=None,
uri=None,
resume=False,
):
"""
Lower level method for starting an experiment. When use this method, one should end the experiment manually
and the status of the recorder may not be handled properly. Here is the example code:
.. code-block:: Python
R.start_exp(experiment_name='test', recorder_name='recorder_1')
... # further operations
R.end_exp('FINISHED') or R.end_exp(Recorder.STATUS_S)
Parameters
----------
experiment_id : str
id of the experiment one wants to start.
experiment_name : str
the name of the experiment to be started
recorder_id : str
id of the recorder under the experiment one wants to start.
recorder_name : str
name of the recorder under the experiment one wants to start.
uri : str
the tracking uri of the experiment, where all the artifacts/metrics etc. will be stored.
The default uri are set in the qlib.config.
resume : bool
whether to resume the specific recorder with given name under the given experiment.
Returns
-------
An experiment instance being started.
"""
return self.exp_manager.start_exp(
experiment_id=experiment_id,
experiment_name=experiment_name,
recorder_id=recorder_id,
recorder_name=recorder_name,
uri=uri,
resume=resume,
)
|
Lower level method for starting an experiment. When use this method, one should end the experiment manually
and the status of the recorder may not be handled properly. Here is the example code:
.. code-block:: Python
R.start_exp(experiment_name='test', recorder_name='recorder_1')
... # further operations
R.end_exp('FINISHED') or R.end_exp(Recorder.STATUS_S)
Parameters
----------
experiment_id : str
id of the experiment one wants to start.
experiment_name : str
the name of the experiment to be started
recorder_id : str
id of the recorder under the experiment one wants to start.
recorder_name : str
name of the recorder under the experiment one wants to start.
uri : str
the tracking uri of the experiment, where all the artifacts/metrics etc. will be stored.
The default uri are set in the qlib.config.
resume : bool
whether to resume the specific recorder with given name under the given experiment.
Returns
-------
An experiment instance being started.
|
start_exp
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def get_exp(
self, *, experiment_id=None, experiment_name=None, create: bool = True, start: bool = False
) -> Experiment:
"""
Method for retrieving an experiment with given id or name. Once the `create` argument is set to
True, if no valid experiment is found, this method will create one for you. Otherwise, it will
only retrieve a specific experiment or raise an Error.
- If '`create`' is True:
- If `active experiment` exists:
- no id or name specified, return the active experiment.
- if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name.
- If `active experiment` not exists:
- no id or name specified, create a default experiment, and the experiment is set to be active.
- if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given name or the default experiment.
- Else If '`create`' is False:
- If `active experiment` exists:
- no id or name specified, return the active experiment.
- if id or name is specified, return the specified experiment. If no such exp found, raise Error.
- If `active experiment` not exists:
- no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
- if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start('test'):
exp = R.get_exp()
recorders = exp.list_recorders()
# Case 2
with R.start('test'):
exp = R.get_exp(experiment_name='test1')
# Case 3
exp = R.get_exp() -> a default experiment.
# Case 4
exp = R.get_exp(experiment_name='test')
# Case 5
exp = R.get_exp(create=False) -> the default experiment if exists.
Parameters
----------
experiment_id : str
id of the experiment.
experiment_name : str
name of the experiment.
create : boolean
an argument determines whether the method will automatically create a new experiment
according to user's specification if the experiment hasn't been created before.
start : bool
when start is True,
if the experiment has not started(not activated), it will start
It is designed for R.log_params to auto start experiments
Returns
-------
An experiment instance with given id or name.
"""
return self.exp_manager.get_exp(
experiment_id=experiment_id,
experiment_name=experiment_name,
create=create,
start=start,
)
|
Method for retrieving an experiment with given id or name. Once the `create` argument is set to
True, if no valid experiment is found, this method will create one for you. Otherwise, it will
only retrieve a specific experiment or raise an Error.
- If '`create`' is True:
- If `active experiment` exists:
- no id or name specified, return the active experiment.
- if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name.
- If `active experiment` not exists:
- no id or name specified, create a default experiment, and the experiment is set to be active.
- if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given name or the default experiment.
- Else If '`create`' is False:
- If `active experiment` exists:
- no id or name specified, return the active experiment.
- if id or name is specified, return the specified experiment. If no such exp found, raise Error.
- If `active experiment` not exists:
- no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
- if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start('test'):
exp = R.get_exp()
recorders = exp.list_recorders()
# Case 2
with R.start('test'):
exp = R.get_exp(experiment_name='test1')
# Case 3
exp = R.get_exp() -> a default experiment.
# Case 4
exp = R.get_exp(experiment_name='test')
# Case 5
exp = R.get_exp(create=False) -> the default experiment if exists.
Parameters
----------
experiment_id : str
id of the experiment.
experiment_name : str
name of the experiment.
create : boolean
an argument determines whether the method will automatically create a new experiment
according to user's specification if the experiment hasn't been created before.
start : bool
when start is True,
if the experiment has not started(not activated), it will start
It is designed for R.log_params to auto start experiments
Returns
-------
An experiment instance with given id or name.
|
get_exp
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def uri_context(self, uri: Text):
"""
Temporarily set the exp_manager's **default_uri** to uri
NOTE:
- Please refer to the NOTE in the `set_uri`
Parameters
----------
uri : Text
the temporal uri
"""
prev_uri = self.exp_manager.default_uri
self.exp_manager.default_uri = uri
try:
yield
finally:
self.exp_manager.default_uri = prev_uri
|
Temporarily set the exp_manager's **default_uri** to uri
NOTE:
- Please refer to the NOTE in the `set_uri`
Parameters
----------
uri : Text
the temporal uri
|
uri_context
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def get_recorder(
self,
*,
recorder_id=None,
recorder_name=None,
experiment_id=None,
experiment_name=None,
) -> Recorder:
"""
Method for retrieving a recorder.
- If `active recorder` exists:
- no id or name specified, return the active recorder.
- if id or name is specified, return the specified recorder.
- If `active recorder` not exists:
- no id or name specified, raise Error.
- if id or name is specified, and the corresponding experiment_name must be given, return the specified recorder. Otherwise, raise Error.
The recorder can be used for further process such as `save_object`, `load_object`, `log_params`,
`log_metrics`, etc.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start(experiment_name='test'):
recorder = R.get_recorder()
# Case 2
with R.start(experiment_name='test'):
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d')
# Case 3
recorder = R.get_recorder() -> Error
# Case 4
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d') -> Error
# Case 5
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d', experiment_name='test')
Here are some things users may concern
- Q: What recorder will it return if multiple recorder meets the query (e.g. query with experiment_name)
- A: If mlflow backend is used, then the recorder with the latest `start_time` will be returned. Because MLflow's `search_runs` function guarantee it
Parameters
----------
recorder_id : str
id of the recorder.
recorder_name : str
name of the recorder.
experiment_name : str
name of the experiment.
Returns
-------
A recorder instance.
"""
return self.get_exp(experiment_name=experiment_name, experiment_id=experiment_id, create=False).get_recorder(
recorder_id, recorder_name, create=False, start=False
)
|
Method for retrieving a recorder.
- If `active recorder` exists:
- no id or name specified, return the active recorder.
- if id or name is specified, return the specified recorder.
- If `active recorder` not exists:
- no id or name specified, raise Error.
- if id or name is specified, and the corresponding experiment_name must be given, return the specified recorder. Otherwise, raise Error.
The recorder can be used for further process such as `save_object`, `load_object`, `log_params`,
`log_metrics`, etc.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start(experiment_name='test'):
recorder = R.get_recorder()
# Case 2
with R.start(experiment_name='test'):
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d')
# Case 3
recorder = R.get_recorder() -> Error
# Case 4
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d') -> Error
# Case 5
recorder = R.get_recorder(recorder_id='2e7a4efd66574fa49039e00ffaefa99d', experiment_name='test')
Here are some things users may concern
- Q: What recorder will it return if multiple recorder meets the query (e.g. query with experiment_name)
- A: If mlflow backend is used, then the recorder with the latest `start_time` will be returned. Because MLflow's `search_runs` function guarantee it
Parameters
----------
recorder_id : str
id of the recorder.
recorder_name : str
name of the recorder.
experiment_name : str
name of the experiment.
Returns
-------
A recorder instance.
|
get_recorder
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def save_objects(self, local_path=None, artifact_path=None, **kwargs: Dict[Text, Any]):
"""
Method for saving objects as artifacts in the experiment to the uri. It supports either saving
from a local file/directory, or directly saving objects. User can use valid python's keywords arguments
to specify the object to be saved as well as its name (name: value).
In summary, this API is designs for saving **objects** to **the experiments management backend path**,
1. Qlib provide two methods to specify **objects**
- Passing in the object directly by passing with `**kwargs` (e.g. R.save_objects(trained_model=model))
- Passing in the local path to the object, i.e. `local_path` parameter.
2. `artifact_path` represents the **the experiments management backend path**
- If `active recorder` exists: it will save the objects through the active recorder.
- If `active recorder` not exists: the system will create a default experiment, and a new recorder and save objects under it.
.. note::
If one wants to save objects with a specific recorder. It is recommended to first get the specific recorder through `get_recorder` API and use the recorder the save objects. The supported arguments are the same as this method.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start(experiment_name='test'):
pred = model.predict(dataset)
R.save_objects(**{"pred.pkl": pred}, artifact_path='prediction')
rid = R.get_recorder().id
...
R.get_recorder(recorder_id=rid).load_object("prediction/pred.pkl") # after saving objects, you can load the previous object with this api
# Case 2
with R.start(experiment_name='test'):
R.save_objects(local_path='results/pred.pkl', artifact_path="prediction")
rid = R.get_recorder().id
...
R.get_recorder(recorder_id=rid).load_object("prediction/pred.pkl") # after saving objects, you can load the previous object with this api
Parameters
----------
local_path : str
if provided, them save the file or directory to the artifact URI.
artifact_path : str
the relative path for the artifact to be stored in the URI.
**kwargs: Dict[Text, Any]
the object to be saved.
For example, `{"pred.pkl": pred}`
"""
if local_path is not None and len(kwargs) > 0:
raise ValueError(
"You can choose only one of `local_path`(save the files in a path) or `kwargs`(pass in the objects directly)"
)
self.get_exp().get_recorder(start=True).save_objects(local_path, artifact_path, **kwargs)
|
Method for saving objects as artifacts in the experiment to the uri. It supports either saving
from a local file/directory, or directly saving objects. User can use valid python's keywords arguments
to specify the object to be saved as well as its name (name: value).
In summary, this API is designs for saving **objects** to **the experiments management backend path**,
1. Qlib provide two methods to specify **objects**
- Passing in the object directly by passing with `**kwargs` (e.g. R.save_objects(trained_model=model))
- Passing in the local path to the object, i.e. `local_path` parameter.
2. `artifact_path` represents the **the experiments management backend path**
- If `active recorder` exists: it will save the objects through the active recorder.
- If `active recorder` not exists: the system will create a default experiment, and a new recorder and save objects under it.
.. note::
If one wants to save objects with a specific recorder. It is recommended to first get the specific recorder through `get_recorder` API and use the recorder the save objects. The supported arguments are the same as this method.
Here are some use cases:
.. code-block:: Python
# Case 1
with R.start(experiment_name='test'):
pred = model.predict(dataset)
R.save_objects(**{"pred.pkl": pred}, artifact_path='prediction')
rid = R.get_recorder().id
...
R.get_recorder(recorder_id=rid).load_object("prediction/pred.pkl") # after saving objects, you can load the previous object with this api
# Case 2
with R.start(experiment_name='test'):
R.save_objects(local_path='results/pred.pkl', artifact_path="prediction")
rid = R.get_recorder().id
...
R.get_recorder(recorder_id=rid).load_object("prediction/pred.pkl") # after saving objects, you can load the previous object with this api
Parameters
----------
local_path : str
if provided, them save the file or directory to the artifact URI.
artifact_path : str
the relative path for the artifact to be stored in the URI.
**kwargs: Dict[Text, Any]
the object to be saved.
For example, `{"pred.pkl": pred}`
|
save_objects
|
python
|
microsoft/qlib
|
qlib/workflow/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/__init__.py
|
MIT
|
def __init__(
self,
strategies: Union[OnlineStrategy, List[OnlineStrategy]],
trainer: Trainer = None,
begin_time: Union[str, pd.Timestamp] = None,
freq="day",
):
"""
Init OnlineManager.
One OnlineManager must have at least one OnlineStrategy.
Args:
strategies (Union[OnlineStrategy, List[OnlineStrategy]]): an instance of OnlineStrategy or a list of OnlineStrategy
begin_time (Union[str,pd.Timestamp], optional): the OnlineManager will begin at this time. Defaults to None for using the latest date.
trainer (qlib.model.trainer.Trainer): the trainer to train task. None for using TrainerR.
freq (str, optional): data frequency. Defaults to "day".
"""
self.logger = get_module_logger(self.__class__.__name__)
if not isinstance(strategies, list):
strategies = [strategies]
self.strategies = strategies
self.freq = freq
if begin_time is None:
begin_time = D.calendar(freq=self.freq).max()
self.begin_time = pd.Timestamp(begin_time)
self.cur_time = self.begin_time
# OnlineManager will recorder the history of online models, which is a dict like {pd.Timestamp, {strategy, [online_models]}}.
# It records the online servnig models of each strategy for each day.
self.history = {}
if trainer is None:
trainer = TrainerR()
self.trainer = trainer
self.signals = None
self.status = self.STATUS_ONLINE
|
Init OnlineManager.
One OnlineManager must have at least one OnlineStrategy.
Args:
strategies (Union[OnlineStrategy, List[OnlineStrategy]]): an instance of OnlineStrategy or a list of OnlineStrategy
begin_time (Union[str,pd.Timestamp], optional): the OnlineManager will begin at this time. Defaults to None for using the latest date.
trainer (qlib.model.trainer.Trainer): the trainer to train task. None for using TrainerR.
freq (str, optional): data frequency. Defaults to "day".
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def first_train(self, strategies: List[OnlineStrategy] = None, model_kwargs: dict = {}):
"""
Get tasks from every strategy's first_tasks method and train them.
If using DelayTrainer, it can finish training all together after every strategy's first_tasks.
Args:
strategies (List[OnlineStrategy]): the strategies list (need this param when adding strategies). None for use default strategies.
model_kwargs (dict): the params for `prepare_online_models`
"""
if strategies is None:
strategies = self.strategies
models_list = []
for strategy in strategies:
self.logger.info(f"Strategy `{strategy.name_id}` begins first training...")
tasks = strategy.first_tasks()
models = self.trainer.train(tasks, experiment_name=strategy.name_id)
models_list.append(models)
self.logger.info(f"Finished training {len(models)} models.")
# FIXME: Train multiple online models at `first_train` will result in getting too much online models at the
# start.
online_models = strategy.prepare_online_models(models, **model_kwargs)
self.history.setdefault(self.cur_time, {})[strategy] = online_models
if not self._postpone_action():
for strategy, models in zip(strategies, models_list):
models = self.trainer.end_train(models, experiment_name=strategy.name_id)
|
Get tasks from every strategy's first_tasks method and train them.
If using DelayTrainer, it can finish training all together after every strategy's first_tasks.
Args:
strategies (List[OnlineStrategy]): the strategies list (need this param when adding strategies). None for use default strategies.
model_kwargs (dict): the params for `prepare_online_models`
|
first_train
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def routine(
self,
cur_time: Union[str, pd.Timestamp] = None,
task_kwargs: dict = {},
model_kwargs: dict = {},
signal_kwargs: dict = {},
):
"""
Typical update process for every strategy and record the online history.
The typical update process after a routine, such as day by day or month by month.
The process is: Update predictions -> Prepare tasks -> Prepare online models -> Prepare signals.
If using DelayTrainer, it can finish training all together after every strategy's prepare_tasks.
Args:
cur_time (Union[str,pd.Timestamp], optional): run routine method in this time. Defaults to None.
task_kwargs (dict): the params for `prepare_tasks`
model_kwargs (dict): the params for `prepare_online_models`
signal_kwargs (dict): the params for `prepare_signals`
"""
if cur_time is None:
cur_time = D.calendar(freq=self.freq).max()
self.cur_time = pd.Timestamp(cur_time) # None for latest date
models_list = []
for strategy in self.strategies:
self.logger.info(f"Strategy `{strategy.name_id}` begins routine...")
tasks = strategy.prepare_tasks(self.cur_time, **task_kwargs)
models = self.trainer.train(tasks, experiment_name=strategy.name_id)
models_list.append(models)
self.logger.info(f"Finished training {len(models)} models.")
online_models = strategy.prepare_online_models(models, **model_kwargs)
self.history.setdefault(self.cur_time, {})[strategy] = online_models
# The online model may changes in the above processes
# So updating the predictions of online models should be the last step
if self.status == self.STATUS_ONLINE:
strategy.tool.update_online_pred()
if not self._postpone_action():
for strategy, models in zip(self.strategies, models_list):
models = self.trainer.end_train(models, experiment_name=strategy.name_id)
self.prepare_signals(**signal_kwargs)
|
Typical update process for every strategy and record the online history.
The typical update process after a routine, such as day by day or month by month.
The process is: Update predictions -> Prepare tasks -> Prepare online models -> Prepare signals.
If using DelayTrainer, it can finish training all together after every strategy's prepare_tasks.
Args:
cur_time (Union[str,pd.Timestamp], optional): run routine method in this time. Defaults to None.
task_kwargs (dict): the params for `prepare_tasks`
model_kwargs (dict): the params for `prepare_online_models`
signal_kwargs (dict): the params for `prepare_signals`
|
routine
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def get_collector(self, **kwargs) -> MergeCollector:
"""
Get the instance of `Collector <../advanced/task_management.html#Task Collecting>`_ to collect results from every strategy.
This collector can be a basis as the signals preparation.
Args:
**kwargs: the params for get_collector.
Returns:
MergeCollector: the collector to merge other collectors.
"""
collector_dict = {}
for strategy in self.strategies:
collector_dict[strategy.name_id] = strategy.get_collector(**kwargs)
return MergeCollector(collector_dict, process_list=[])
|
Get the instance of `Collector <../advanced/task_management.html#Task Collecting>`_ to collect results from every strategy.
This collector can be a basis as the signals preparation.
Args:
**kwargs: the params for get_collector.
Returns:
MergeCollector: the collector to merge other collectors.
|
get_collector
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def add_strategy(self, strategies: Union[OnlineStrategy, List[OnlineStrategy]]):
"""
Add some new strategies to OnlineManager.
Args:
strategy (Union[OnlineStrategy, List[OnlineStrategy]]): a list of OnlineStrategy
"""
if not isinstance(strategies, list):
strategies = [strategies]
self.first_train(strategies)
self.strategies.extend(strategies)
|
Add some new strategies to OnlineManager.
Args:
strategy (Union[OnlineStrategy, List[OnlineStrategy]]): a list of OnlineStrategy
|
add_strategy
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def prepare_signals(self, prepare_func: Callable = AverageEnsemble(), over_write=False):
"""
After preparing the data of the last routine (a box in box-plot) which means the end of the routine, we can prepare trading signals for the next routine.
NOTE: Given a set prediction, all signals before these prediction end times will be prepared well.
Even if the latest signal already exists, the latest calculation result will be overwritten.
.. note::
Given a prediction of a certain time, all signals before this time will be prepared well.
Args:
prepare_func (Callable, optional): Get signals from a dict after collecting. Defaults to AverageEnsemble(), the results collected by MergeCollector must be {xxx:pred}.
over_write (bool, optional): If True, the new signals will overwrite. If False, the new signals will append to the end of signals. Defaults to False.
Returns:
pd.DataFrame: the signals.
"""
signals = prepare_func(self.get_collector()())
old_signals = self.signals
if old_signals is not None and not over_write:
old_max = old_signals.index.get_level_values("datetime").max()
new_signals = signals.loc[old_max:]
signals = pd.concat([old_signals, new_signals], axis=0)
else:
new_signals = signals
self.logger.info(f"Finished preparing new {len(new_signals)} signals.")
self.signals = signals
return new_signals
|
After preparing the data of the last routine (a box in box-plot) which means the end of the routine, we can prepare trading signals for the next routine.
NOTE: Given a set prediction, all signals before these prediction end times will be prepared well.
Even if the latest signal already exists, the latest calculation result will be overwritten.
.. note::
Given a prediction of a certain time, all signals before this time will be prepared well.
Args:
prepare_func (Callable, optional): Get signals from a dict after collecting. Defaults to AverageEnsemble(), the results collected by MergeCollector must be {xxx:pred}.
over_write (bool, optional): If True, the new signals will overwrite. If False, the new signals will append to the end of signals. Defaults to False.
Returns:
pd.DataFrame: the signals.
|
prepare_signals
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def simulate(
self, end_time=None, frequency="day", task_kwargs={}, model_kwargs={}, signal_kwargs={}
) -> Union[pd.Series, pd.DataFrame]:
"""
Starting from the current time, this method will simulate every routine in OnlineManager until the end time.
Considering the parallel training, the models and signals can be prepared after all routine simulating.
The delay training way can be ``DelayTrainer`` and the delay preparing signals way can be ``delay_prepare``.
Args:
end_time: the time the simulation will end
frequency: the calendar frequency
task_kwargs (dict): the params for `prepare_tasks`
model_kwargs (dict): the params for `prepare_online_models`
signal_kwargs (dict): the params for `prepare_signals`
Returns:
Union[pd.Series, pd.DataFrame]: pd.Series for only one signals every datetime.
pd.DataFrame for multiple signals, for example, buy and sell operations use different trading signals.
"""
self.status = self.STATUS_SIMULATING
cal = D.calendar(start_time=self.cur_time, end_time=end_time, freq=frequency)
self.first_train()
simulate_level = self.SIM_LOG_LEVEL
set_global_logger_level(simulate_level)
logging.addLevelName(simulate_level, self.SIM_LOG_NAME)
for cur_time in cal:
self.logger.log(level=simulate_level, msg=f"Simulating at {str(cur_time)}......")
self.routine(
cur_time,
task_kwargs=task_kwargs,
model_kwargs=model_kwargs,
signal_kwargs=signal_kwargs,
)
# delay prepare the models and signals
if self._postpone_action():
self.delay_prepare(model_kwargs=model_kwargs, signal_kwargs=signal_kwargs)
# FIXME: get logging level firstly and restore it here
set_global_logger_level(logging.DEBUG)
self.logger.info(f"Finished preparing signals")
self.status = self.STATUS_ONLINE
return self.get_signals()
|
Starting from the current time, this method will simulate every routine in OnlineManager until the end time.
Considering the parallel training, the models and signals can be prepared after all routine simulating.
The delay training way can be ``DelayTrainer`` and the delay preparing signals way can be ``delay_prepare``.
Args:
end_time: the time the simulation will end
frequency: the calendar frequency
task_kwargs (dict): the params for `prepare_tasks`
model_kwargs (dict): the params for `prepare_online_models`
signal_kwargs (dict): the params for `prepare_signals`
Returns:
Union[pd.Series, pd.DataFrame]: pd.Series for only one signals every datetime.
pd.DataFrame for multiple signals, for example, buy and sell operations use different trading signals.
|
simulate
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def delay_prepare(self, model_kwargs={}, signal_kwargs={}):
"""
Prepare all models and signals if something is waiting for preparation.
Args:
model_kwargs: the params for `end_train`
signal_kwargs: the params for `prepare_signals`
"""
# FIXME:
# This method is not implemented in the proper way!!!
last_models = {}
signals_time = D.calendar()[0]
need_prepare = False
for cur_time, strategy_models in self.history.items():
self.cur_time = cur_time
for strategy, models in strategy_models.items():
# only new online models need to prepare
if last_models.setdefault(strategy, set()) != set(models):
models = self.trainer.end_train(models, experiment_name=strategy.name_id, **model_kwargs)
strategy.tool.reset_online_tag(models)
need_prepare = True
last_models[strategy] = set(models)
if need_prepare:
# NOTE: Assumption: the predictions of online models need less than next cur_time, or this method will work in a wrong way.
self.prepare_signals(**signal_kwargs)
if signals_time > cur_time:
# FIXME: if use DelayTrainer and worker (and worker is faster than main progress), there are some possibilities of showing this warning.
self.logger.warn(
f"The signals have already parpred to {signals_time} by last preparation, but current time is only {cur_time}. This may be because the online models predict more than they should, which can cause signals to be contaminated by the offline models."
)
need_prepare = False
signals_time = self.signals.index.get_level_values("datetime").max()
|
Prepare all models and signals if something is waiting for preparation.
Args:
model_kwargs: the params for `end_train`
signal_kwargs: the params for `prepare_signals`
|
delay_prepare
|
python
|
microsoft/qlib
|
qlib/workflow/online/manager.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/manager.py
|
MIT
|
def __init__(self, name_id: str):
"""
Init OnlineStrategy.
This module **MUST** use `Trainer <../reference/api.html#qlib.model.trainer.Trainer>`_ to finishing model training.
Args:
name_id (str): a unique name or id.
trainer (qlib.model.trainer.Trainer, optional): a instance of Trainer. Defaults to None.
"""
self.name_id = name_id
self.logger = get_module_logger(self.__class__.__name__)
self.tool = OnlineTool()
|
Init OnlineStrategy.
This module **MUST** use `Trainer <../reference/api.html#qlib.model.trainer.Trainer>`_ to finishing model training.
Args:
name_id (str): a unique name or id.
trainer (qlib.model.trainer.Trainer, optional): a instance of Trainer. Defaults to None.
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def prepare_online_models(self, trained_models, cur_time=None) -> List[object]:
"""
Select some models from trained models and set them to online models.
This is a typical implementation to online all trained models, you can override it to implement the complex method.
You can find the last online models by OnlineTool.online_models if you still need them.
NOTE: Reset all online models to trained models. If there are no trained models, then do nothing.
**NOTE**:
Current implementation is very naive. Here is a more complex situation which is more closer to the
practical scenarios.
1. Train new models at the day before `test_start` (at time stamp `T`)
2. Switch models at the `test_start` (at time timestamp `T + 1` typically)
Args:
models (list): a list of models.
cur_time (pd.Dataframe): current time from OnlineManger. None for the latest.
Returns:
List[object]: a list of online models.
"""
if not trained_models:
return self.tool.online_models()
self.tool.reset_online_tag(trained_models)
return trained_models
|
Select some models from trained models and set them to online models.
This is a typical implementation to online all trained models, you can override it to implement the complex method.
You can find the last online models by OnlineTool.online_models if you still need them.
NOTE: Reset all online models to trained models. If there are no trained models, then do nothing.
**NOTE**:
Current implementation is very naive. Here is a more complex situation which is more closer to the
practical scenarios.
1. Train new models at the day before `test_start` (at time stamp `T`)
2. Switch models at the `test_start` (at time timestamp `T + 1` typically)
Args:
models (list): a list of models.
cur_time (pd.Dataframe): current time from OnlineManger. None for the latest.
Returns:
List[object]: a list of online models.
|
prepare_online_models
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def __init__(
self,
name_id: str,
task_template: Union[dict, List[dict]],
rolling_gen: RollingGen,
):
"""
Init RollingStrategy.
Assumption: the str of name_id, the experiment name, and the trainer's experiment name are the same.
Args:
name_id (str): a unique name or id. Will be also the name of the Experiment.
task_template (Union[dict, List[dict]]): a list of task_template or a single template, which will be used to generate many tasks using rolling_gen.
rolling_gen (RollingGen): an instance of RollingGen
"""
super().__init__(name_id=name_id)
self.exp_name = self.name_id
if not isinstance(task_template, list):
task_template = [task_template]
self.task_template = task_template
self.rg = rolling_gen
assert issubclass(self.rg.__class__, RollingGen), "The rolling strategy relies on the feature if RollingGen"
self.tool = OnlineToolR(self.exp_name)
self.ta = TimeAdjuster()
|
Init RollingStrategy.
Assumption: the str of name_id, the experiment name, and the trainer's experiment name are the same.
Args:
name_id (str): a unique name or id. Will be also the name of the Experiment.
task_template (Union[dict, List[dict]]): a list of task_template or a single template, which will be used to generate many tasks using rolling_gen.
rolling_gen (RollingGen): an instance of RollingGen
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def get_collector(self, process_list=[RollingGroup()], rec_key_func=None, rec_filter_func=None, artifacts_key=None):
"""
Get the instance of `Collector <../advanced/task_management.html#Task Collecting>`_ to collect results. The returned collector must distinguish results in different models.
Assumption: the models can be distinguished based on the model name and rolling test segments.
If you do not want this assumption, please implement your method or use another rec_key_func.
Args:
rec_key_func (Callable): a function to get the key of a recorder. If None, use recorder id.
rec_filter_func (Callable, optional): filter the recorder by return True or False. Defaults to None.
artifacts_key (List[str], optional): the artifacts key you want to get. If None, get all artifacts.
"""
def rec_key(recorder):
task_config = recorder.load_object("task")
model_key = task_config["model"]["class"]
rolling_key = task_config["dataset"]["kwargs"]["segments"]["test"]
return model_key, rolling_key
if rec_key_func is None:
rec_key_func = rec_key
artifacts_collector = RecorderCollector(
experiment=self.exp_name,
process_list=process_list,
rec_key_func=rec_key_func,
rec_filter_func=rec_filter_func,
artifacts_key=artifacts_key,
)
return artifacts_collector
|
Get the instance of `Collector <../advanced/task_management.html#Task Collecting>`_ to collect results. The returned collector must distinguish results in different models.
Assumption: the models can be distinguished based on the model name and rolling test segments.
If you do not want this assumption, please implement your method or use another rec_key_func.
Args:
rec_key_func (Callable): a function to get the key of a recorder. If None, use recorder id.
rec_filter_func (Callable, optional): filter the recorder by return True or False. Defaults to None.
artifacts_key (List[str], optional): the artifacts key you want to get. If None, get all artifacts.
|
get_collector
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def first_tasks(self) -> List[dict]:
"""
Use rolling_gen to generate different tasks based on task_template.
Returns:
List[dict]: a list of tasks
"""
return task_generator(
tasks=self.task_template,
generators=self.rg, # generate different date segment
)
|
Use rolling_gen to generate different tasks based on task_template.
Returns:
List[dict]: a list of tasks
|
first_tasks
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def prepare_tasks(self, cur_time) -> List[dict]:
"""
Prepare new tasks based on cur_time (None for the latest).
You can find the last online models by OnlineToolR.online_models.
Returns:
List[dict]: a list of new tasks.
"""
# TODO: filter recorders by latest test segments is not a necessary
latest_records, max_test = self._list_latest(self.tool.online_models())
if max_test is None:
self.logger.warn(f"No latest online recorders, no new tasks.")
return []
calendar_latest = transform_end_date(cur_time)
self.logger.info(
f"The interval between current time {calendar_latest} and last rolling test begin time {max_test[0]} is {self.ta.cal_interval(calendar_latest, max_test[0])}, the rolling step is {self.rg.step}"
)
res = []
for rec in latest_records:
task = rec.load_object("task")
res.extend(self.rg.gen_following_tasks(task, calendar_latest))
return res
|
Prepare new tasks based on cur_time (None for the latest).
You can find the last online models by OnlineToolR.online_models.
Returns:
List[dict]: a list of new tasks.
|
prepare_tasks
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def _list_latest(self, rec_list: List[Recorder]):
"""
List latest recorder form rec_list
Args:
rec_list (List[Recorder]): a list of Recorder
Returns:
List[Recorder], pd.Timestamp: the latest recorders and their test end time
"""
if len(rec_list) == 0:
return rec_list, None
max_test = max(rec.load_object("task")["dataset"]["kwargs"]["segments"]["test"] for rec in rec_list)
latest_rec = []
for rec in rec_list:
if rec.load_object("task")["dataset"]["kwargs"]["segments"]["test"] == max_test:
latest_rec.append(rec)
return latest_rec, max_test
|
List latest recorder form rec_list
Args:
rec_list (List[Recorder]): a list of Recorder
Returns:
List[Recorder], pd.Timestamp: the latest recorders and their test end time
|
_list_latest
|
python
|
microsoft/qlib
|
qlib/workflow/online/strategy.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/strategy.py
|
MIT
|
def get_dataset(
self, start_time, end_time, segments=None, unprepared_dataset: Optional[DatasetH] = None
) -> DatasetH:
"""
Load, config and setup dataset.
This dataset is for inference.
Args:
start_time :
the start_time of underlying data
end_time :
the end_time of underlying data
segments : dict
the segments config for dataset
Due to the time series dataset (TSDatasetH), the test segments maybe different from start_time and end_time
unprepared_dataset: Optional[DatasetH]
if user don't want to load dataset from recorder, please specify user's dataset
Returns:
DatasetH: the instance of DatasetH
"""
if segments is None:
segments = {"test": (start_time, end_time)}
if unprepared_dataset is None:
dataset: DatasetH = self.rec.load_object("dataset")
else:
dataset = unprepared_dataset
dataset.config(handler_kwargs={"start_time": start_time, "end_time": end_time}, segments=segments)
dataset.setup_data(handler_kwargs={"init_type": DataHandlerLP.IT_LS})
return dataset
|
Load, config and setup dataset.
This dataset is for inference.
Args:
start_time :
the start_time of underlying data
end_time :
the end_time of underlying data
segments : dict
the segments config for dataset
Due to the time series dataset (TSDatasetH), the test segments maybe different from start_time and end_time
unprepared_dataset: Optional[DatasetH]
if user don't want to load dataset from recorder, please specify user's dataset
Returns:
DatasetH: the instance of DatasetH
|
get_dataset
|
python
|
microsoft/qlib
|
qlib/workflow/online/update.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/update.py
|
MIT
|
def __init__(
self,
record: Recorder,
to_date=None,
from_date=None,
hist_ref: Optional[int] = None,
freq="day",
fname="pred.pkl",
loader_cls: type = RMDLoader,
):
"""
Init PredUpdater.
Expected behavior in following cases:
- if `to_date` is greater than the max date in the calendar, the data will be updated to the latest date
- if there are data before `from_date` or after `to_date`, only the data between `from_date` and `to_date` are affected.
Args:
record : Recorder
to_date :
update to prediction to the `to_date`
if to_date is None:
data will updated to the latest date.
from_date :
the update will start from `from_date`
if from_date is None:
the updating will occur on the next tick after the latest data in historical data
hist_ref : int
Sometimes, the dataset will have historical depends.
Leave the problem to users to set the length of historical dependency
If user doesn't specify this parameter, Updater will try to load dataset to automatically determine the hist_ref
.. note::
the start_time is not included in the `hist_ref`; So the `hist_ref` will be `step_len - 1` in most cases
loader_cls : type
the class to load the model and dataset
"""
# TODO: automate this hist_ref in the future.
super().__init__(record=record)
self.to_date = to_date
self.hist_ref = hist_ref
self.freq = freq
self.fname = fname
self.rmdl = loader_cls(rec=record)
latest_date = D.calendar(freq=freq)[-1]
if to_date is None:
to_date = latest_date
to_date = pd.Timestamp(to_date)
if to_date >= latest_date:
self.logger.warning(
f"The given `to_date`({to_date}) is later than `latest_date`({latest_date}). So `to_date` is clipped to `latest_date`."
)
to_date = latest_date
self.to_date = to_date
# FIXME: it will raise error when running routine with delay trainer
# should we use another prediction updater for delay trainer?
self.old_data: pd.DataFrame = record.load_object(fname)
if from_date is None:
# dropna is for being compatible to some data with future information(e.g. label)
# The recent label data should be updated together
self.last_end = self.old_data.dropna().index.get_level_values("datetime").max()
else:
self.last_end = get_date_by_shift(from_date, -1, align="right")
|
Init PredUpdater.
Expected behavior in following cases:
- if `to_date` is greater than the max date in the calendar, the data will be updated to the latest date
- if there are data before `from_date` or after `to_date`, only the data between `from_date` and `to_date` are affected.
Args:
record : Recorder
to_date :
update to prediction to the `to_date`
if to_date is None:
data will updated to the latest date.
from_date :
the update will start from `from_date`
if from_date is None:
the updating will occur on the next tick after the latest data in historical data
hist_ref : int
Sometimes, the dataset will have historical depends.
Leave the problem to users to set the length of historical dependency
If user doesn't specify this parameter, Updater will try to load dataset to automatically determine the hist_ref
.. note::
the start_time is not included in the `hist_ref`; So the `hist_ref` will be `step_len - 1` in most cases
loader_cls : type
the class to load the model and dataset
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/online/update.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/update.py
|
MIT
|
def prepare_data(self, unprepared_dataset: Optional[DatasetH] = None) -> DatasetH:
"""
Load dataset
- if unprepared_dataset is specified, then prepare the dataset directly
- Otherwise,
Separating this function will make it easier to reuse the dataset
Returns:
DatasetH: the instance of DatasetH
"""
# automatically getting the historical dependency if not specified
if self.hist_ref is None:
dataset: DatasetH = self.record.load_object("dataset") if unprepared_dataset is None else unprepared_dataset
# Special treatment of historical dependencies
if isinstance(dataset, TSDatasetH):
hist_ref = dataset.step_len - 1
else:
hist_ref = 0 # if only the lastest data is used, then only current data will be used and no historical data will be used
else:
hist_ref = self.hist_ref
start_time_buffer = get_date_by_shift(
self.last_end, -hist_ref + 1, clip_shift=False, freq=self.freq # pylint: disable=E1130
)
start_time = get_date_by_shift(self.last_end, 1, freq=self.freq)
seg = {"test": (start_time, self.to_date)}
return self.rmdl.get_dataset(
start_time=start_time_buffer, end_time=self.to_date, segments=seg, unprepared_dataset=unprepared_dataset
)
|
Load dataset
- if unprepared_dataset is specified, then prepare the dataset directly
- Otherwise,
Separating this function will make it easier to reuse the dataset
Returns:
DatasetH: the instance of DatasetH
|
prepare_data
|
python
|
microsoft/qlib
|
qlib/workflow/online/update.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/update.py
|
MIT
|
def update(self, dataset: DatasetH = None, write: bool = True, ret_new: bool = False) -> Optional[object]:
"""
Parameters
----------
dataset : DatasetH
DatasetH: the instance of DatasetH. None for prepare it again.
write : bool
will the the write action be executed
ret_new : bool
will the updated data be returned
Returns
-------
Optional[object]
the updated dataset
"""
# FIXME: the problem below is not solved
# The model dumped on GPU instances can not be loaded on CPU instance. Follow exception will raised
# RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
# https://github.com/pytorch/pytorch/issues/16797
if self.last_end >= self.to_date:
self.logger.info(
f"The data in {self.record.info['id']} are latest ({self.last_end}). No need to update to {self.to_date}."
)
return
# load dataset
if dataset is None:
# For reusing the dataset
dataset = self.prepare_data()
updated_data = self.get_update_data(dataset)
if write:
self.record.save_objects(**{self.fname: updated_data})
if ret_new:
return updated_data
|
Parameters
----------
dataset : DatasetH
DatasetH: the instance of DatasetH. None for prepare it again.
write : bool
will the the write action be executed
ret_new : bool
will the updated data be returned
Returns
-------
Optional[object]
the updated dataset
|
update
|
python
|
microsoft/qlib
|
qlib/workflow/online/update.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/update.py
|
MIT
|
def get_update_data(self, dataset: Dataset) -> pd.DataFrame:
"""
return the updated data based on the given dataset
The difference between `get_update_data` and `update`
- `update_date` only include some data specific feature
- `update` include some general routine steps(e.g. prepare dataset, checking)
"""
|
return the updated data based on the given dataset
The difference between `get_update_data` and `update`
- `update_date` only include some data specific feature
- `update` include some general routine steps(e.g. prepare dataset, checking)
|
get_update_data
|
python
|
microsoft/qlib
|
qlib/workflow/online/update.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/update.py
|
MIT
|
def __init__(self, default_exp_name: str = None):
"""
Init OnlineToolR.
Args:
default_exp_name (str): the default experiment name.
"""
super().__init__()
self.default_exp_name = default_exp_name
|
Init OnlineToolR.
Args:
default_exp_name (str): the default experiment name.
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def set_online_tag(self, tag, recorder: Union[Recorder, List]):
"""
Set `tag` to the model's recorder to sign whether online.
Args:
tag (str): the tags in `ONLINE_TAG`, `NEXT_ONLINE_TAG`, `OFFLINE_TAG`
recorder (Union[Recorder, List]): a list of Recorder or an instance of Recorder
"""
if isinstance(recorder, Recorder):
recorder = [recorder]
for rec in recorder:
rec.set_tags(**{self.ONLINE_KEY: tag})
self.logger.info(f"Set {len(recorder)} models to '{tag}'.")
|
Set `tag` to the model's recorder to sign whether online.
Args:
tag (str): the tags in `ONLINE_TAG`, `NEXT_ONLINE_TAG`, `OFFLINE_TAG`
recorder (Union[Recorder, List]): a list of Recorder or an instance of Recorder
|
set_online_tag
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def get_online_tag(self, recorder: Recorder) -> str:
"""
Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag
"""
tags = recorder.list_tags()
return tags.get(self.ONLINE_KEY, self.OFFLINE_TAG)
|
Given a model recorder and return its online tag.
Args:
recorder (Recorder): an instance of recorder
Returns:
str: the online tag
|
get_online_tag
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def reset_online_tag(self, recorder: Union[Recorder, List], exp_name: str = None):
"""
Offline all models and set the recorders to 'online'.
Args:
recorder (Union[Recorder, List]):
the recorder you want to reset to 'online'.
exp_name (str): the experiment name. If None, then use default_exp_name.
"""
exp_name = self._get_exp_name(exp_name)
if isinstance(recorder, Recorder):
recorder = [recorder]
recs = list_recorders(exp_name)
self.set_online_tag(self.OFFLINE_TAG, list(recs.values()))
self.set_online_tag(self.ONLINE_TAG, recorder)
|
Offline all models and set the recorders to 'online'.
Args:
recorder (Union[Recorder, List]):
the recorder you want to reset to 'online'.
exp_name (str): the experiment name. If None, then use default_exp_name.
|
reset_online_tag
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def online_models(self, exp_name: str = None) -> list:
"""
Get current `online` models
Args:
exp_name (str): the experiment name. If None, then use default_exp_name.
Returns:
list: a list of `online` models.
"""
exp_name = self._get_exp_name(exp_name)
return list(list_recorders(exp_name, lambda rec: self.get_online_tag(rec) == self.ONLINE_TAG).values())
|
Get current `online` models
Args:
exp_name (str): the experiment name. If None, then use default_exp_name.
Returns:
list: a list of `online` models.
|
online_models
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def update_online_pred(self, to_date=None, from_date=None, exp_name: str = None):
"""
Update the predictions of online models to to_date.
Args:
to_date (pd.Timestamp): the pred before this date will be updated. None for updating to latest time in Calendar.
exp_name (str): the experiment name. If None, then use default_exp_name.
"""
exp_name = self._get_exp_name(exp_name)
online_models = self.online_models(exp_name=exp_name)
for rec in online_models:
try:
updater = PredUpdater(rec, to_date=to_date, from_date=from_date)
except LoadObjectError as e:
# skip the recorder without pred
self.logger.warn(f"An exception `{str(e)}` happened when load `pred.pkl`, skip it.")
continue
updater.update()
self.logger.info(f"Finished updating {len(online_models)} online model predictions of {exp_name}.")
|
Update the predictions of online models to to_date.
Args:
to_date (pd.Timestamp): the pred before this date will be updated. None for updating to latest time in Calendar.
exp_name (str): the experiment name. If None, then use default_exp_name.
|
update_online_pred
|
python
|
microsoft/qlib
|
qlib/workflow/online/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/online/utils.py
|
MIT
|
def __init__(self, process_list=[]):
"""
Init Collector.
Args:
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
"""
if not isinstance(process_list, list):
process_list = [process_list]
self.process_list = process_list
|
Init Collector.
Args:
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def process_collect(collected_dict, process_list=[], *args, **kwargs) -> dict:
"""
Do a series of processing to the dict returned by collect and return a dict like {key: things}
For example, you can group and ensemble.
Args:
collected_dict (dict): the dict return by `collect`
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
The processor order is the same as the list order.
For example: [Group1(..., Ensemble1()), Group2(..., Ensemble2())]
Returns:
dict: the dict after processing.
"""
if not isinstance(process_list, list):
process_list = [process_list]
result = {}
for artifact in collected_dict:
value = collected_dict[artifact]
for process in process_list:
if not callable(process):
raise NotImplementedError(f"{type(process)} is not supported in `process_collect`.")
value = process(value, *args, **kwargs)
result[artifact] = value
return result
|
Do a series of processing to the dict returned by collect and return a dict like {key: things}
For example, you can group and ensemble.
Args:
collected_dict (dict): the dict return by `collect`
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
The processor order is the same as the list order.
For example: [Group1(..., Ensemble1()), Group2(..., Ensemble2())]
Returns:
dict: the dict after processing.
|
process_collect
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def __call__(self, *args, **kwargs) -> dict:
"""
Do the workflow including ``collect`` and ``process_collect``
Returns:
dict: the dict after collecting and processing.
"""
collected = self.collect()
return self.process_collect(collected, self.process_list, *args, **kwargs)
|
Do the workflow including ``collect`` and ``process_collect``
Returns:
dict: the dict after collecting and processing.
|
__call__
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def __init__(self, collector_dict: Dict[str, Collector], process_list: List[Callable] = [], merge_func=None):
"""
Init MergeCollector.
Args:
collector_dict (Dict[str,Collector]): the dict like {collector_key, Collector}
process_list (List[Callable]): the list of processors or the instance of processor to process dict.
merge_func (Callable): a method to generate outermost key. The given params are ``collector_key`` from collector_dict and ``key`` from every collector after collecting.
None for using tuple to connect them, such as "ABC"+("a","b") -> ("ABC", ("a","b")).
"""
super().__init__(process_list=process_list)
self.collector_dict = collector_dict
self.merge_func = merge_func
|
Init MergeCollector.
Args:
collector_dict (Dict[str,Collector]): the dict like {collector_key, Collector}
process_list (List[Callable]): the list of processors or the instance of processor to process dict.
merge_func (Callable): a method to generate outermost key. The given params are ``collector_key`` from collector_dict and ``key`` from every collector after collecting.
None for using tuple to connect them, such as "ABC"+("a","b") -> ("ABC", ("a","b")).
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def collect(self) -> dict:
"""
Collect all results of collector_dict and change the outermost key to a recombination key.
Returns:
dict: the dict after collecting.
"""
collect_dict = {}
for collector_key, collector in self.collector_dict.items():
tmp_dict = collector()
for key, value in tmp_dict.items():
if self.merge_func is not None:
collect_dict[self.merge_func(collector_key, key)] = value
else:
collect_dict[(collector_key, key)] = value
return collect_dict
|
Collect all results of collector_dict and change the outermost key to a recombination key.
Returns:
dict: the dict after collecting.
|
collect
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def __init__(
self,
experiment,
process_list=[],
rec_key_func=None,
rec_filter_func=None,
artifacts_path={"pred": "pred.pkl"},
artifacts_key=None,
list_kwargs={},
status: Iterable = {Recorder.STATUS_FI},
):
"""
Init RecorderCollector.
Args:
experiment:
(Experiment or str): an instance of an Experiment or the name of an Experiment
(Callable): an callable function, which returns a list of experiments
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
rec_key_func (Callable): a function to get the key of a recorder. If None, use recorder id.
rec_filter_func (Callable, optional): filter the recorder by return True or False. Defaults to None.
artifacts_path (dict, optional): The artifacts name and its path in Recorder. Defaults to {"pred": "pred.pkl", "IC": "sig_analysis/ic.pkl"}.
artifacts_key (str or List, optional): the artifacts key you want to get. If None, get all artifacts.
list_kwargs (str): arguments for list_recorders function.
status (Iterable): only collect recorders with specific status. None indicating collecting all the recorders
"""
super().__init__(process_list=process_list)
if isinstance(experiment, str):
experiment = R.get_exp(experiment_name=experiment)
assert isinstance(experiment, (Experiment, Callable))
self.experiment = experiment
self.artifacts_path = artifacts_path
if rec_key_func is None:
def rec_key_func(rec):
return rec.info["id"]
if artifacts_key is None:
artifacts_key = list(self.artifacts_path.keys())
self.rec_key_func = rec_key_func
self.artifacts_key = artifacts_key
self.rec_filter_func = rec_filter_func
self.list_kwargs = list_kwargs
self.status = status
|
Init RecorderCollector.
Args:
experiment:
(Experiment or str): an instance of an Experiment or the name of an Experiment
(Callable): an callable function, which returns a list of experiments
process_list (list or Callable): the list of processors or the instance of a processor to process dict.
rec_key_func (Callable): a function to get the key of a recorder. If None, use recorder id.
rec_filter_func (Callable, optional): filter the recorder by return True or False. Defaults to None.
artifacts_path (dict, optional): The artifacts name and its path in Recorder. Defaults to {"pred": "pred.pkl", "IC": "sig_analysis/ic.pkl"}.
artifacts_key (str or List, optional): the artifacts key you want to get. If None, get all artifacts.
list_kwargs (str): arguments for list_recorders function.
status (Iterable): only collect recorders with specific status. None indicating collecting all the recorders
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def collect(self, artifacts_key=None, rec_filter_func=None, only_exist=True) -> dict:
"""
Collect different artifacts based on recorder after filtering.
Args:
artifacts_key (str or List, optional): the artifacts key you want to get. If None, use the default.
rec_filter_func (Callable, optional): filter the recorder by return True or False. If None, use the default.
only_exist (bool, optional): if only collect the artifacts when a recorder really has.
If True, the recorder with exception when loading will not be collected. But if False, it will raise the exception.
Returns:
dict: the dict after collected like {artifact: {rec_key: object}}
"""
if artifacts_key is None:
artifacts_key = self.artifacts_key
if rec_filter_func is None:
rec_filter_func = self.rec_filter_func
if isinstance(artifacts_key, str):
artifacts_key = [artifacts_key]
collect_dict = {}
# filter records
if isinstance(self.experiment, Experiment):
with TimeInspector.logt("Time to `list_recorders` in RecorderCollector"):
recs = list(self.experiment.list_recorders(**self.list_kwargs).values())
elif isinstance(self.experiment, Callable):
recs = self.experiment()
recs = [
rec
for rec in recs
if (
(self.status is None or rec.status in self.status) and (rec_filter_func is None or rec_filter_func(rec))
)
]
logger = get_module_logger("RecorderCollector")
status_stat = defaultdict(int)
for r in recs:
status_stat[r.status] += 1
logger.info(f"Nubmer of recorders after filter: {status_stat}")
for rec in recs:
rec_key = self.rec_key_func(rec)
for key in artifacts_key:
if self.ART_KEY_RAW == key:
artifact = rec
else:
try:
artifact = rec.load_object(self.artifacts_path[key])
except LoadObjectError as e:
if only_exist:
# only collect existing artifact
logger.warning(f"Fail to load {self.artifacts_path[key]} and it is ignored.")
continue
raise e
# give user some warning if the values are overridden
cdd = collect_dict.setdefault(key, {})
if rec_key in cdd:
logger.warning(
f"key '{rec_key}' is duplicated. Previous value will be overrides. Please check you `rec_key_func`"
)
cdd[rec_key] = artifact
return collect_dict
|
Collect different artifacts based on recorder after filtering.
Args:
artifacts_key (str or List, optional): the artifacts key you want to get. If None, use the default.
rec_filter_func (Callable, optional): filter the recorder by return True or False. If None, use the default.
only_exist (bool, optional): if only collect the artifacts when a recorder really has.
If True, the recorder with exception when loading will not be collected. But if False, it will raise the exception.
Returns:
dict: the dict after collected like {artifact: {rec_key: object}}
|
collect
|
python
|
microsoft/qlib
|
qlib/workflow/task/collect.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/collect.py
|
MIT
|
def task_generator(tasks, generators) -> list:
"""
Use a list of TaskGen and a list of task templates to generate different tasks.
For examples:
There are 3 task templates a,b,c and 2 TaskGen A,B. A will generates 2 tasks from a template and B will generates 3 tasks from a template.
task_generator([a, b, c], [A, B]) will finally generate 3*2*3 = 18 tasks.
Parameters
----------
tasks : List[dict] or dict
a list of task templates or a single task
generators : List[TaskGen] or TaskGen
a list of TaskGen or a single TaskGen
Returns
-------
list
a list of tasks
"""
if isinstance(tasks, dict):
tasks = [tasks]
if isinstance(generators, TaskGen):
generators = [generators]
# generate gen_task_list
for gen in generators:
new_task_list = []
for task in tasks:
new_task_list.extend(gen.generate(task))
tasks = new_task_list
return tasks
|
Use a list of TaskGen and a list of task templates to generate different tasks.
For examples:
There are 3 task templates a,b,c and 2 TaskGen A,B. A will generates 2 tasks from a template and B will generates 3 tasks from a template.
task_generator([a, b, c], [A, B]) will finally generate 3*2*3 = 18 tasks.
Parameters
----------
tasks : List[dict] or dict
a list of task templates or a single task
generators : List[TaskGen] or TaskGen
a list of TaskGen or a single TaskGen
Returns
-------
list
a list of tasks
|
task_generator
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def generate(self, task: dict) -> List[dict]:
"""
Generate different tasks based on a task template
Parameters
----------
task: dict
a task template
Returns
-------
typing.List[dict]:
A list of tasks
"""
|
Generate different tasks based on a task template
Parameters
----------
task: dict
a task template
Returns
-------
typing.List[dict]:
A list of tasks
|
generate
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def handler_mod(task: dict, rolling_gen):
"""
Help to modify the handler end time when using RollingGen
It try to handle the following case
- Hander's data end_time is earlier than dataset's test_data's segments.
- To handle this, handler's data's end_time is extended.
If the handler's end_time is None, then it is not necessary to change it's end time.
Args:
task (dict): a task template
rg (RollingGen): an instance of RollingGen
"""
try:
interval = rolling_gen.ta.cal_interval(
task["dataset"]["kwargs"]["handler"]["kwargs"]["end_time"],
task["dataset"]["kwargs"]["segments"][rolling_gen.test_key][1],
)
# if end_time < the end of test_segments, then change end_time to allow load more data
if interval < 0:
task["dataset"]["kwargs"]["handler"]["kwargs"]["end_time"] = copy.deepcopy(
task["dataset"]["kwargs"]["segments"][rolling_gen.test_key][1]
)
except KeyError:
# Maybe dataset do not have handler, then do nothing.
pass
except TypeError:
# May be the handler is a string. `"handler.pkl"["kwargs"]` will raise TypeError
# e.g. a dumped file like file:///<file>/
pass
|
Help to modify the handler end time when using RollingGen
It try to handle the following case
- Hander's data end_time is earlier than dataset's test_data's segments.
- To handle this, handler's data's end_time is extended.
If the handler's end_time is None, then it is not necessary to change it's end time.
Args:
task (dict): a task template
rg (RollingGen): an instance of RollingGen
|
handler_mod
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def trunc_segments(ta: TimeAdjuster, segments: Dict[str, pd.Timestamp], days, test_key="test"):
"""
To avoid the leakage of future information, the segments should be truncated according to the test start_time
NOTE:
This function will change segments **inplace**
"""
# adjust segment
test_start = min(t for t in segments[test_key] if t is not None)
for k in list(segments.keys()):
if k != test_key:
segments[k] = ta.truncate(segments[k], test_start, days)
|
To avoid the leakage of future information, the segments should be truncated according to the test start_time
NOTE:
This function will change segments **inplace**
|
trunc_segments
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def __init__(
self,
step: int = 40,
rtype: str = ROLL_EX,
ds_extra_mod_func: Union[None, Callable] = handler_mod,
test_key="test",
train_key="train",
trunc_days: int = None,
task_copy_func: Callable = copy.deepcopy,
):
"""
Generate tasks for rolling
Parameters
----------
step : int
step to rolling
rtype : str
rolling type (expanding, sliding)
ds_extra_mod_func: Callable
A method like: handler_mod(task: dict, rg: RollingGen)
Do some extra action after generating a task. For example, use ``handler_mod`` to modify the end time of the handler of a dataset.
trunc_days: int
trunc some data to avoid future information leakage
task_copy_func: Callable
the function to copy entire task. This is very useful when user want to share something between tasks
"""
self.step = step
self.rtype = rtype
self.ds_extra_mod_func = ds_extra_mod_func
self.ta = TimeAdjuster(future=True)
self.test_key = test_key
self.train_key = train_key
self.trunc_days = trunc_days
self.task_copy_func = task_copy_func
|
Generate tasks for rolling
Parameters
----------
step : int
step to rolling
rtype : str
rolling type (expanding, sliding)
ds_extra_mod_func: Callable
A method like: handler_mod(task: dict, rg: RollingGen)
Do some extra action after generating a task. For example, use ``handler_mod`` to modify the end time of the handler of a dataset.
trunc_days: int
trunc some data to avoid future information leakage
task_copy_func: Callable
the function to copy entire task. This is very useful when user want to share something between tasks
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def gen_following_tasks(self, task: dict, test_end: pd.Timestamp) -> List[dict]:
"""
generating following rolling tasks for `task` until test_end
Parameters
----------
task : dict
Qlib task format
test_end : pd.Timestamp
the latest rolling task includes `test_end`
Returns
-------
List[dict]:
the following tasks of `task`(`task` itself is excluded)
"""
prev_seg = task["dataset"]["kwargs"]["segments"]
while True:
segments = {}
try:
for k, seg in prev_seg.items():
# decide how to shift
# expanding only for train data, the segments size of test data and valid data won't change
if k == self.train_key and self.rtype == self.ROLL_EX:
rtype = self.ta.SHIFT_EX
else:
rtype = self.ta.SHIFT_SD
# shift the segments data
segments[k] = self.ta.shift(seg, step=self.step, rtype=rtype)
if segments[self.test_key][0] > test_end:
break
except KeyError:
# We reach the end of tasks
# No more rolling
break
prev_seg = segments
t = self.task_copy_func(task) # deepcopy is necessary to avoid replace task inplace
self._update_task_segs(t, segments)
yield t
|
generating following rolling tasks for `task` until test_end
Parameters
----------
task : dict
Qlib task format
test_end : pd.Timestamp
the latest rolling task includes `test_end`
Returns
-------
List[dict]:
the following tasks of `task`(`task` itself is excluded)
|
gen_following_tasks
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def generate(self, task: dict) -> List[dict]:
"""
Converting the task into a rolling task.
Parameters
----------
task: dict
A dict describing a task. For example.
.. code-block:: python
DEFAULT_TASK = {
"model": {
"class": "LGBModel",
"module_path": "qlib.contrib.model.gbdt",
},
"dataset": {
"class": "DatasetH",
"module_path": "qlib.data.dataset",
"kwargs": {
"handler": {
"class": "Alpha158",
"module_path": "qlib.contrib.data.handler",
"kwargs": {
"start_time": "2008-01-01",
"end_time": "2020-08-01",
"fit_start_time": "2008-01-01",
"fit_end_time": "2014-12-31",
"instruments": "csi100",
},
},
"segments": {
"train": ("2008-01-01", "2014-12-31"),
"valid": ("2015-01-01", "2016-12-20"), # Please avoid leaking the future test data into validation
"test": ("2017-01-01", "2020-08-01"),
},
},
},
"record": [
{
"class": "SignalRecord",
"module_path": "qlib.workflow.record_temp",
},
]
}
Returns
----------
List[dict]: a list of tasks
"""
res = []
t = self.task_copy_func(task)
# calculate segments
# First rolling
# 1) prepare the end point
segments: dict = copy.deepcopy(self.ta.align_seg(t["dataset"]["kwargs"]["segments"]))
test_end = transform_end_date(segments[self.test_key][1])
# 2) and init test segments
test_start_idx = self.ta.align_idx(segments[self.test_key][0])
segments[self.test_key] = (self.ta.get(test_start_idx), self.ta.get(test_start_idx + self.step - 1))
if self.trunc_days is not None:
trunc_segments(self.ta, segments, self.trunc_days, self.test_key)
# update segments of this task
self._update_task_segs(t, segments)
res.append(t)
# Update the following rolling
res.extend(self.gen_following_tasks(t, test_end))
return res
|
Converting the task into a rolling task.
Parameters
----------
task: dict
A dict describing a task. For example.
.. code-block:: python
DEFAULT_TASK = {
"model": {
"class": "LGBModel",
"module_path": "qlib.contrib.model.gbdt",
},
"dataset": {
"class": "DatasetH",
"module_path": "qlib.data.dataset",
"kwargs": {
"handler": {
"class": "Alpha158",
"module_path": "qlib.contrib.data.handler",
"kwargs": {
"start_time": "2008-01-01",
"end_time": "2020-08-01",
"fit_start_time": "2008-01-01",
"fit_end_time": "2014-12-31",
"instruments": "csi100",
},
},
"segments": {
"train": ("2008-01-01", "2014-12-31"),
"valid": ("2015-01-01", "2016-12-20"), # Please avoid leaking the future test data into validation
"test": ("2017-01-01", "2020-08-01"),
},
},
},
"record": [
{
"class": "SignalRecord",
"module_path": "qlib.workflow.record_temp",
},
]
}
Returns
----------
List[dict]: a list of tasks
|
generate
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def __init__(self, horizon: List[int] = [5], label_leak_n=2):
"""
This task generator tries to generate tasks for different horizons based on an existing task
Parameters
----------
horizon : List[int]
the possible horizons of the tasks
label_leak_n : int
How many future days it will take to get complete label after the day making prediction
For example:
- User make prediction on day `T`(after getting the close price on `T`)
- The label is the return of buying stock on `T + 1` and selling it on `T + 2`
- the `label_leak_n` will be 2 (e.g. two days of information is leaked to leverage this sample)
"""
self.horizon = list(horizon)
self.label_leak_n = label_leak_n
self.ta = TimeAdjuster()
self.test_key = "test"
|
This task generator tries to generate tasks for different horizons based on an existing task
Parameters
----------
horizon : List[int]
the possible horizons of the tasks
label_leak_n : int
How many future days it will take to get complete label after the day making prediction
For example:
- User make prediction on day `T`(after getting the close price on `T`)
- The label is the return of buying stock on `T + 1` and selling it on `T + 2`
- the `label_leak_n` will be 2 (e.g. two days of information is leaked to leverage this sample)
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def set_horizon(self, task: dict, hr: int):
"""
This method is designed to change the task **in place**
Parameters
----------
task : dict
Qlib's task
hr : int
the horizon of task
"""
|
This method is designed to change the task **in place**
Parameters
----------
task : dict
Qlib's task
hr : int
the horizon of task
|
set_horizon
|
python
|
microsoft/qlib
|
qlib/workflow/task/gen.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/gen.py
|
MIT
|
def __init__(self, task_pool: str):
"""
Init Task Manager, remember to make the statement of MongoDB url and database name firstly.
A TaskManager instance serves a specific task pool.
The static method of this module serves the whole MongoDB.
Parameters
----------
task_pool: str
the name of Collection in MongoDB
"""
self.task_pool: pymongo.collection.Collection = getattr(get_mongodb(), task_pool)
self.logger = get_module_logger(self.__class__.__name__)
self.logger.info(f"task_pool:{task_pool}")
|
Init Task Manager, remember to make the statement of MongoDB url and database name firstly.
A TaskManager instance serves a specific task pool.
The static method of this module serves the whole MongoDB.
Parameters
----------
task_pool: str
the name of Collection in MongoDB
|
__init__
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def _decode_task(self, task):
"""
_decode_task is Serialization tool.
Mongodb needs JSON, so it needs to convert Python objects into JSON objects through pickle
Parameters
----------
task : dict
task information
Returns
-------
dict
JSON required by mongodb
"""
for prefix in self.ENCODE_FIELDS_PREFIX:
for k in list(task.keys()):
if k.startswith(prefix):
task[k] = pickle.loads(task[k])
return task
|
_decode_task is Serialization tool.
Mongodb needs JSON, so it needs to convert Python objects into JSON objects through pickle
Parameters
----------
task : dict
task information
Returns
-------
dict
JSON required by mongodb
|
_decode_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def _decode_query(self, query):
"""
If the query includes any `_id`, then it needs `ObjectId` to decode.
For example, when using TrainerRM, it needs query `{"_id": {"$in": _id_list}}`. Then we need to `ObjectId` every `_id` in `_id_list`.
Args:
query (dict): query dict. Defaults to {}.
Returns:
dict: the query after decoding.
"""
if "_id" in query:
if isinstance(query["_id"], dict):
for key in query["_id"]:
query["_id"][key] = [ObjectId(i) for i in query["_id"][key]]
else:
query["_id"] = ObjectId(query["_id"])
return query
|
If the query includes any `_id`, then it needs `ObjectId` to decode.
For example, when using TrainerRM, it needs query `{"_id": {"$in": _id_list}}`. Then we need to `ObjectId` every `_id` in `_id_list`.
Args:
query (dict): query dict. Defaults to {}.
Returns:
dict: the query after decoding.
|
_decode_query
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def replace_task(self, task, new_task):
"""
Use a new task to replace a old one
Args:
task: old task
new_task: new task
"""
new_task = self._encode_task(new_task)
query = {"_id": ObjectId(task["_id"])}
try:
self.task_pool.replace_one(query, new_task)
except InvalidDocument:
task["filter"] = self._dict_to_str(task["filter"])
self.task_pool.replace_one(query, new_task)
|
Use a new task to replace a old one
Args:
task: old task
new_task: new task
|
replace_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def insert_task(self, task):
"""
Insert a task.
Args:
task: the task waiting for insert
Returns:
pymongo.results.InsertOneResult
"""
try:
insert_result = self.task_pool.insert_one(task)
except InvalidDocument:
task["filter"] = self._dict_to_str(task["filter"])
insert_result = self.task_pool.insert_one(task)
return insert_result
|
Insert a task.
Args:
task: the task waiting for insert
Returns:
pymongo.results.InsertOneResult
|
insert_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def insert_task_def(self, task_def):
"""
Insert a task to task_pool
Parameters
----------
task_def: dict
the task definition
Returns
-------
pymongo.results.InsertOneResult
"""
task = self._encode_task(
{
"def": task_def,
"filter": task_def, # FIXME: catch the raised error
"status": self.STATUS_WAITING,
}
)
insert_result = self.insert_task(task)
return insert_result
|
Insert a task to task_pool
Parameters
----------
task_def: dict
the task definition
Returns
-------
pymongo.results.InsertOneResult
|
insert_task_def
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def create_task(self, task_def_l, dry_run=False, print_nt=False) -> List[str]:
"""
If the tasks in task_def_l are new, then insert new tasks into the task_pool, and record inserted_id.
If a task is not new, then just query its _id.
Parameters
----------
task_def_l: list
a list of task
dry_run: bool
if insert those new tasks to task pool
print_nt: bool
if print new task
Returns
-------
List[str]
a list of the _id of task_def_l
"""
new_tasks = []
_id_list = []
for t in task_def_l:
try:
r = self.task_pool.find_one({"filter": t})
except InvalidDocument:
r = self.task_pool.find_one({"filter": self._dict_to_str(t)})
# When r is none, it indicates that r s a new task
if r is None:
new_tasks.append(t)
if not dry_run:
insert_result = self.insert_task_def(t)
_id_list.append(insert_result.inserted_id)
else:
_id_list.append(None)
else:
_id_list.append(self._decode_task(r)["_id"])
self.logger.info(f"Total Tasks: {len(task_def_l)}, New Tasks: {len(new_tasks)}")
if print_nt: # print new task
for t in new_tasks:
print(t)
if dry_run:
return []
return _id_list
|
If the tasks in task_def_l are new, then insert new tasks into the task_pool, and record inserted_id.
If a task is not new, then just query its _id.
Parameters
----------
task_def_l: list
a list of task
dry_run: bool
if insert those new tasks to task pool
print_nt: bool
if print new task
Returns
-------
List[str]
a list of the _id of task_def_l
|
create_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def fetch_task(self, query={}, status=STATUS_WAITING) -> dict:
"""
Use query to fetch tasks.
Args:
query (dict, optional): query dict. Defaults to {}.
status (str, optional): [description]. Defaults to STATUS_WAITING.
Returns:
dict: a task(document in collection) after decoding
"""
query = query.copy()
query = self._decode_query(query)
query.update({"status": status})
task = self.task_pool.find_one_and_update(
query, {"$set": {"status": self.STATUS_RUNNING}}, sort=[("priority", pymongo.DESCENDING)]
)
# null will be at the top after sorting when using ASCENDING, so the larger the number higher, the higher the priority
if task is None:
return None
task["status"] = self.STATUS_RUNNING
return self._decode_task(task)
|
Use query to fetch tasks.
Args:
query (dict, optional): query dict. Defaults to {}.
status (str, optional): [description]. Defaults to STATUS_WAITING.
Returns:
dict: a task(document in collection) after decoding
|
fetch_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def safe_fetch_task(self, query={}, status=STATUS_WAITING):
"""
Fetch task from task_pool using query with contextmanager
Parameters
----------
query: dict
the dict of query
Returns
-------
dict: a task(document in collection) after decoding
"""
task = self.fetch_task(query=query, status=status)
try:
yield task
except (Exception, KeyboardInterrupt): # KeyboardInterrupt is not a subclass of Exception
if task is not None:
self.logger.info("Returning task before raising error")
self.return_task(task, status=status) # return task as the original status
self.logger.info("Task returned")
raise
|
Fetch task from task_pool using query with contextmanager
Parameters
----------
query: dict
the dict of query
Returns
-------
dict: a task(document in collection) after decoding
|
safe_fetch_task
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
def query(self, query={}, decode=True):
"""
Query task in collection.
This function may raise exception `pymongo.errors.CursorNotFound: cursor id not found` if it takes too long to iterate the generator
python -m qlib.workflow.task.manage -t <your task pool> query '{"_id": "615498be837d0053acbc5d58"}'
Parameters
----------
query: dict
the dict of query
decode: bool
Returns
-------
dict: a task(document in collection) after decoding
"""
query = query.copy()
query = self._decode_query(query)
for t in self.task_pool.find(query):
yield self._decode_task(t)
|
Query task in collection.
This function may raise exception `pymongo.errors.CursorNotFound: cursor id not found` if it takes too long to iterate the generator
python -m qlib.workflow.task.manage -t <your task pool> query '{"_id": "615498be837d0053acbc5d58"}'
Parameters
----------
query: dict
the dict of query
decode: bool
Returns
-------
dict: a task(document in collection) after decoding
|
query
|
python
|
microsoft/qlib
|
qlib/workflow/task/manage.py
|
https://github.com/microsoft/qlib/blob/master/qlib/workflow/task/manage.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.