code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def train(
simulator_fn: Callable[[InitialStateType], Simulator],
state_interpreter: StateInterpreter,
action_interpreter: ActionInterpreter,
initial_states: Sequence[InitialStateType],
policy: BasePolicy,
reward: Reward,
vessel_kwargs: Dict[str, Any],
trainer_kwargs: Dict[str, Any],
) -> None:
"""Train a policy with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to train against.
reward
Reward function.
vessel_kwargs
Keyword arguments passed to :class:`TrainingVessel`, like ``episode_per_iter``.
trainer_kwargs
Keyword arguments passed to :class:`Trainer`, like ``finite_env_type``, ``concurrency``.
"""
vessel = TrainingVessel(
simulator_fn=simulator_fn,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
policy=policy,
train_initial_states=initial_states,
reward=reward, # ignore none
**vessel_kwargs,
)
trainer = Trainer(**trainer_kwargs)
trainer.fit(vessel)
|
Train a policy with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to train against.
reward
Reward function.
vessel_kwargs
Keyword arguments passed to :class:`TrainingVessel`, like ``episode_per_iter``.
trainer_kwargs
Keyword arguments passed to :class:`Trainer`, like ``finite_env_type``, ``concurrency``.
|
train
|
python
|
microsoft/qlib
|
qlib/rl/trainer/api.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/api.py
|
MIT
|
def backtest(
simulator_fn: Callable[[InitialStateType], Simulator],
state_interpreter: StateInterpreter,
action_interpreter: ActionInterpreter,
initial_states: Sequence[InitialStateType],
policy: BasePolicy,
logger: LogWriter | List[LogWriter],
reward: Reward | None = None,
finite_env_type: FiniteEnvType = "subproc",
concurrency: int = 2,
) -> None:
"""Backtest with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to test against.
logger
Logger to record the backtest results. Logger must be present because
without logger, all information will be lost.
reward
Optional reward function. For backtest, this is for testing the rewards
and logging them only.
finite_env_type
Type of finite env implementation.
concurrency
Parallel workers.
"""
vessel = TrainingVessel(
simulator_fn=simulator_fn,
state_interpreter=state_interpreter,
action_interpreter=action_interpreter,
policy=policy,
test_initial_states=initial_states,
reward=cast(Reward, reward), # ignore none
)
trainer = Trainer(
finite_env_type=finite_env_type,
concurrency=concurrency,
loggers=logger,
)
trainer.test(vessel)
|
Backtest with the parallelism provided by RL framework.
Experimental API. Parameters might change shortly.
Parameters
----------
simulator_fn
Callable receiving initial seed, returning a simulator.
state_interpreter
Interprets the state of simulators.
action_interpreter
Interprets the policy actions.
initial_states
Initial states to iterate over. Every state will be run exactly once.
policy
Policy to test against.
logger
Logger to record the backtest results. Logger must be present because
without logger, all information will be lost.
reward
Optional reward function. For backtest, this is for testing the rewards
and logging them only.
finite_env_type
Type of finite env implementation.
concurrency
Parallel workers.
|
backtest
|
python
|
microsoft/qlib
|
qlib/rl/trainer/api.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/api.py
|
MIT
|
def on_train_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called when the training ends.
To access all outputs produced during training, cache the data in either trainer and vessel,
and post-process them in this hook.
"""
|
Called when the training ends.
To access all outputs produced during training, cache the data in either trainer and vessel,
and post-process them in this hook.
|
on_train_end
|
python
|
microsoft/qlib
|
qlib/rl/trainer/callbacks.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/callbacks.py
|
MIT
|
def on_iter_end(self, trainer: Trainer, vessel: TrainingVesselBase) -> None:
"""Called upon every end of iteration.
This is called **after** the bump of ``current_iter``,
when the previous iteration is considered complete.
"""
|
Called upon every end of iteration.
This is called **after** the bump of ``current_iter``,
when the previous iteration is considered complete.
|
on_iter_end
|
python
|
microsoft/qlib
|
qlib/rl/trainer/callbacks.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/callbacks.py
|
MIT
|
def initialize(self):
"""Initialize the whole training process.
The states here should be synchronized with state_dict.
"""
self.should_stop = False
self.current_iter = 0
self.current_episode = 0
self.current_stage = "train"
|
Initialize the whole training process.
The states here should be synchronized with state_dict.
|
initialize
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def state_dict(self) -> dict:
"""Putting every states of current training into a dict, at best effort.
It doesn't try to handle all the possible kinds of states in the middle of one training collect.
For most cases at the end of each iteration, things should be usually correct.
Note that it's also intended behavior that replay buffer data in the collector will be lost.
"""
return {
"vessel": self.vessel.state_dict(),
"callbacks": {name: callback.state_dict() for name, callback in self.named_callbacks().items()},
"loggers": {name: logger.state_dict() for name, logger in self.named_loggers().items()},
"should_stop": self.should_stop,
"current_iter": self.current_iter,
"current_episode": self.current_episode,
"current_stage": self.current_stage,
"metrics": self.metrics,
}
|
Putting every states of current training into a dict, at best effort.
It doesn't try to handle all the possible kinds of states in the middle of one training collect.
For most cases at the end of each iteration, things should be usually correct.
Note that it's also intended behavior that replay buffer data in the collector will be lost.
|
state_dict
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def load_state_dict(self, state_dict: dict) -> None:
"""Load all states into current trainer."""
self.vessel.load_state_dict(state_dict["vessel"])
for name, callback in self.named_callbacks().items():
callback.load_state_dict(state_dict["callbacks"][name])
for name, logger in self.named_loggers().items():
logger.load_state_dict(state_dict["loggers"][name])
self.should_stop = state_dict["should_stop"]
self.current_iter = state_dict["current_iter"]
self.current_episode = state_dict["current_episode"]
self.current_stage = state_dict["current_stage"]
self.metrics = state_dict["metrics"]
|
Load all states into current trainer.
|
load_state_dict
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def fit(self, vessel: TrainingVesselBase, ckpt_path: Path | None = None) -> None:
"""Train the RL policy upon the defined simulator.
Parameters
----------
vessel
A bundle of all elements used in training.
ckpt_path
Load a pre-trained / paused training checkpoint.
"""
self.vessel = vessel
vessel.assign_trainer(self)
if ckpt_path is not None:
_logger.info("Resuming states from %s", str(ckpt_path))
self.load_state_dict(torch.load(ckpt_path, weights_only=False))
else:
self.initialize()
self._call_callback_hooks("on_fit_start")
while not self.should_stop:
msg = f"\n{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\tTrain iteration {self.current_iter + 1}/{self.max_iters}"
_logger.info(msg)
self.initialize_iter()
self._call_callback_hooks("on_iter_start")
self.current_stage = "train"
self._call_callback_hooks("on_train_start")
# TODO
# Add a feature that supports reloading the training environment every few iterations.
with _wrap_context(vessel.train_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.train(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_train_end")
if self.val_every_n_iters is not None and (self.current_iter + 1) % self.val_every_n_iters == 0:
# Implementation of validation loop
self.current_stage = "val"
self._call_callback_hooks("on_validate_start")
with _wrap_context(vessel.val_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.validate(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_validate_end")
# This iteration is considered complete.
# Bumping the current iteration counter.
self.current_iter += 1
if self.max_iters is not None and self.current_iter >= self.max_iters:
self.should_stop = True
self._call_callback_hooks("on_iter_end")
self._call_callback_hooks("on_fit_end")
|
Train the RL policy upon the defined simulator.
Parameters
----------
vessel
A bundle of all elements used in training.
ckpt_path
Load a pre-trained / paused training checkpoint.
|
fit
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def test(self, vessel: TrainingVesselBase) -> None:
"""Test the RL policy against the simulator.
The simulator will be fed with data generated in ``test_seed_iterator``.
Parameters
----------
vessel
A bundle of all related elements.
"""
self.vessel = vessel
vessel.assign_trainer(self)
self.initialize_iter()
self.current_stage = "test"
self._call_callback_hooks("on_test_start")
with _wrap_context(vessel.test_seed_iterator()) as iterator:
vector_env = self.venv_from_iterator(iterator)
self.vessel.test(vector_env)
del vector_env # FIXME: Explicitly delete this object to avoid memory leak.
self._call_callback_hooks("on_test_end")
|
Test the RL policy against the simulator.
The simulator will be fed with data generated in ``test_seed_iterator``.
Parameters
----------
vessel
A bundle of all related elements.
|
test
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def venv_from_iterator(self, iterator: Iterable[InitialStateType]) -> FiniteVectorEnv:
"""Create a vectorized environment from iterator and the training vessel."""
def env_factory():
# FIXME: state_interpreter and action_interpreter are stateful (having a weakref of env),
# and could be thread unsafe.
# I'm not sure whether it's a design flaw.
# I'll rethink about this when designing the trainer.
if self.finite_env_type == "dummy":
# We could only experience the "threading-unsafe" problem in dummy.
state = copy.deepcopy(self.vessel.state_interpreter)
action = copy.deepcopy(self.vessel.action_interpreter)
rew = copy.deepcopy(self.vessel.reward)
else:
state = self.vessel.state_interpreter
action = self.vessel.action_interpreter
rew = self.vessel.reward
return EnvWrapper(
self.vessel.simulator_fn,
state,
action,
iterator,
rew,
logger=LogCollector(min_loglevel=self._min_loglevel()),
)
return vectorize_env(
env_factory,
self.finite_env_type,
self.concurrency,
self.loggers,
)
|
Create a vectorized environment from iterator and the training vessel.
|
venv_from_iterator
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def _wrap_context(obj):
"""Make any object a (possibly dummy) context manager."""
if isinstance(obj, AbstractContextManager):
# obj has __enter__ and __exit__
with obj as ctx:
yield ctx
else:
yield obj
|
Make any object a (possibly dummy) context manager.
|
_wrap_context
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def _named_collection(seq: Sequence[T]) -> Dict[str, T]:
"""Convert a list into a dict, where each item is named with its type."""
res = {}
retry_cnt: collections.Counter = collections.Counter()
for item in seq:
typename = type(item).__name__.lower()
key = typename if retry_cnt[typename] == 0 else f"{typename}{retry_cnt[typename]}"
retry_cnt[typename] += 1
res[key] = item
return res
|
Convert a list into a dict, where each item is named with its type.
|
_named_collection
|
python
|
microsoft/qlib
|
qlib/rl/trainer/trainer.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/trainer.py
|
MIT
|
def train(self, vector_env: FiniteVectorEnv) -> Dict[str, Any]:
"""Create a collector and collects ``episode_per_iter`` episodes.
Update the policy on the collected replay buffer.
"""
self.policy.train()
with vector_env.collector_guard():
collector = Collector(
self.policy, vector_env, VectorReplayBuffer(self.buffer_size, len(vector_env)), exploration_noise=True
)
# Number of episodes collected in each training iteration can be overridden by fast dev run.
if self.trainer.fast_dev_run is not None:
episodes = self.trainer.fast_dev_run
else:
episodes = self.episode_per_iter
col_result = collector.collect(n_episode=episodes)
update_result = self.policy.update(sample_size=0, buffer=collector.buffer, **self.update_kwargs)
res = {**col_result, **update_result}
self.log_dict(res)
return res
|
Create a collector and collects ``episode_per_iter`` episodes.
Update the policy on the collected replay buffer.
|
train
|
python
|
microsoft/qlib
|
qlib/rl/trainer/vessel.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/trainer/vessel.py
|
MIT
|
def reset(self, **kwargs: Any) -> ObsType:
"""
Try to get a state from state queue, and init the simulator with this state.
If the queue is exhausted, generate an invalid (nan) observation.
"""
try:
if self.seed_iterator is None:
raise RuntimeError("You can trying to get a state from a dead environment wrapper.")
# TODO: simulator/observation might need seed to prefetch something
# as only seed has the ability to do the work beforehands
# NOTE: though logger is reset here, logs in this function won't work,
# because we can't send them outside.
# See https://github.com/thu-ml/tianshou/issues/605
self.logger.reset()
if self.seed_iterator is SEED_INTERATOR_MISSING:
# no initial state
initial_state = None
self.simulator = cast(Callable[[], Simulator], self.simulator_fn)()
else:
initial_state = next(cast(Iterator[InitialStateType], self.seed_iterator))
self.simulator = self.simulator_fn(initial_state)
self.status = EnvWrapperStatus(
cur_step=0,
done=False,
initial_state=initial_state,
obs_history=[],
action_history=[],
reward_history=[],
)
self.simulator.env = cast(EnvWrapper, weakref.proxy(self))
sim_state = self.simulator.get_state()
obs = self.state_interpreter(sim_state)
self.status["obs_history"].append(obs)
return obs
except StopIteration:
# The environment should be recycled because it's in a dead state.
self.seed_iterator = None
return generate_nan_observation(self.observation_space)
|
Try to get a state from state queue, and init the simulator with this state.
If the queue is exhausted, generate an invalid (nan) observation.
|
reset
|
python
|
microsoft/qlib
|
qlib/rl/utils/env_wrapper.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/env_wrapper.py
|
MIT
|
def step(self, policy_action: PolicyActType, **kwargs: Any) -> Tuple[ObsType, float, bool, InfoDict]:
"""Environment step.
See the code along with comments to get a sequence of things happening here.
"""
if self.seed_iterator is None:
raise RuntimeError("State queue is already exhausted, but the environment is still receiving action.")
# Clear the logged information from last step
self.logger.reset()
# Action is what we have got from policy
self.status["action_history"].append(policy_action)
action = self.action_interpreter(self.simulator.get_state(), policy_action)
# This update must be after action interpreter and before simulator.
self.status["cur_step"] += 1
# Use the converted action of update the simulator
self.simulator.step(action)
# Update "done" first, as this status might be used by reward_fn later
done = self.simulator.done()
self.status["done"] = done
# Get state and calculate observation
sim_state = self.simulator.get_state()
obs = self.state_interpreter(sim_state)
self.status["obs_history"].append(obs)
# Reward and extra info
if self.reward_fn is not None:
rew = self.reward_fn(sim_state)
else:
# No reward. Treated as 0.
rew = 0.0
self.status["reward_history"].append(rew)
if self.aux_info_collector is not None:
aux_info = self.aux_info_collector(sim_state)
else:
aux_info = {}
# Final logging stuff: RL-specific logs
if done:
self.logger.add_scalar("steps_per_episode", self.status["cur_step"])
self.logger.add_scalar("reward", rew)
self.logger.add_any("obs", obs, loglevel=LogLevel.DEBUG)
self.logger.add_any("policy_act", policy_action, loglevel=LogLevel.DEBUG)
info_dict = InfoDict(log=self.logger.logs(), aux_info=aux_info)
return obs, rew, done, info_dict
|
Environment step.
See the code along with comments to get a sequence of things happening here.
|
step
|
python
|
microsoft/qlib
|
qlib/rl/utils/env_wrapper.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/env_wrapper.py
|
MIT
|
def generate_nan_observation(obs_space: gym.Space) -> Any:
"""The NaN observation that indicates the environment receives no seed.
We assume that obs is complex and there must be something like float.
Otherwise this logic doesn't work.
"""
sample = obs_space.sample()
sample = fill_invalid(sample)
return sample
|
The NaN observation that indicates the environment receives no seed.
We assume that obs is complex and there must be something like float.
Otherwise this logic doesn't work.
|
generate_nan_observation
|
python
|
microsoft/qlib
|
qlib/rl/utils/finite_env.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/finite_env.py
|
MIT
|
def collector_guard(self) -> Generator[FiniteVectorEnv, None, None]:
"""Guard the collector. Recommended to guard every collect.
This guard is for two purposes.
1. Catch and ignore the StopIteration exception, which is the stopping signal
thrown by FiniteEnv to let tianshou know that ``collector.collect()`` should exit.
2. Notify the loggers that the collect is ready / done what it's ready / done.
Examples
--------
>>> with finite_env.collector_guard():
... collector.collect(n_episode=INF)
"""
self._collector_guarded = True
for logger in self._logger:
logger.on_env_all_ready()
try:
yield self
except StopIteration:
pass
finally:
self._collector_guarded = False
# At last trigger the loggers
for logger in self._logger:
logger.on_env_all_done()
|
Guard the collector. Recommended to guard every collect.
This guard is for two purposes.
1. Catch and ignore the StopIteration exception, which is the stopping signal
thrown by FiniteEnv to let tianshou know that ``collector.collect()`` should exit.
2. Notify the loggers that the collect is ready / done what it's ready / done.
Examples
--------
>>> with finite_env.collector_guard():
... collector.collect(n_episode=INF)
|
collector_guard
|
python
|
microsoft/qlib
|
qlib/rl/utils/finite_env.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/finite_env.py
|
MIT
|
def vectorize_env(
env_factory: Callable[..., gym.Env],
env_type: FiniteEnvType,
concurrency: int,
logger: LogWriter | List[LogWriter],
) -> FiniteVectorEnv:
"""Helper function to create a vector env. Can be used to replace usual VectorEnv.
For example, once you wrote: ::
DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
Now you can replace it with: ::
finite_env_factory(lambda: gym.make(task), "dummy", env_num, my_logger)
By doing such replacement, you have two additional features enabled (compared to normal VectorEnv):
1. The vector env will check for NaN observation and kill the worker when its found.
See :class:`FiniteVectorEnv` for why we need this.
2. A logger to explicit collect logs from environment workers.
Parameters
----------
env_factory
Callable to instantiate one single ``gym.Env``.
All concurrent workers will have the same ``env_factory``.
env_type
dummy or subproc or shmem. Corresponding to
`parallelism in tianshou <https://tianshou.readthedocs.io/en/master/api/tianshou.env.html#vectorenv>`_.
concurrency
Concurrent environment workers.
logger
Log writers.
Warnings
--------
Please do not use lambda expression here for ``env_factory`` as it may create incorrectly-shared instances.
Don't do: ::
vectorize_env(lambda: EnvWrapper(...), ...)
Please do: ::
def env_factory(): ...
vectorize_env(env_factory, ...)
"""
env_type_cls_mapping: Dict[str, Type[FiniteVectorEnv]] = {
"dummy": FiniteDummyVectorEnv,
"subproc": FiniteSubprocVectorEnv,
"shmem": FiniteShmemVectorEnv,
}
finite_env_cls = env_type_cls_mapping[env_type]
return finite_env_cls(logger, [env_factory for _ in range(concurrency)])
|
Helper function to create a vector env. Can be used to replace usual VectorEnv.
For example, once you wrote: ::
DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
Now you can replace it with: ::
finite_env_factory(lambda: gym.make(task), "dummy", env_num, my_logger)
By doing such replacement, you have two additional features enabled (compared to normal VectorEnv):
1. The vector env will check for NaN observation and kill the worker when its found.
See :class:`FiniteVectorEnv` for why we need this.
2. A logger to explicit collect logs from environment workers.
Parameters
----------
env_factory
Callable to instantiate one single ``gym.Env``.
All concurrent workers will have the same ``env_factory``.
env_type
dummy or subproc or shmem. Corresponding to
`parallelism in tianshou <https://tianshou.readthedocs.io/en/master/api/tianshou.env.html#vectorenv>`_.
concurrency
Concurrent environment workers.
logger
Log writers.
Warnings
--------
Please do not use lambda expression here for ``env_factory`` as it may create incorrectly-shared instances.
Don't do: ::
vectorize_env(lambda: EnvWrapper(...), ...)
Please do: ::
def env_factory(): ...
vectorize_env(env_factory, ...)
|
vectorize_env
|
python
|
microsoft/qlib
|
qlib/rl/utils/finite_env.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/finite_env.py
|
MIT
|
def add_string(self, name: str, string: str, loglevel: int | LogLevel = LogLevel.PERIODIC) -> None:
"""Add a string with name into logged contents."""
if loglevel < self._min_loglevel:
return
if not isinstance(string, str):
raise TypeError(f"{string} is not a string.")
self._add_metric(name, string, loglevel)
|
Add a string with name into logged contents.
|
add_string
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def add_scalar(self, name: str, scalar: Any, loglevel: int | LogLevel = LogLevel.PERIODIC) -> None:
"""Add a scalar with name into logged contents.
Scalar will be converted into a float.
"""
if loglevel < self._min_loglevel:
return
if hasattr(scalar, "item"):
# could be single-item number
scalar = scalar.item()
if not isinstance(scalar, (float, int)):
raise TypeError(f"{scalar} is not and can not be converted into float or integer.")
scalar = float(scalar)
self._add_metric(name, scalar, loglevel)
|
Add a scalar with name into logged contents.
Scalar will be converted into a float.
|
add_scalar
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def add_array(
self,
name: str,
array: np.ndarray | pd.DataFrame | pd.Series,
loglevel: int | LogLevel = LogLevel.PERIODIC,
) -> None:
"""Add an array with name into logging."""
if loglevel < self._min_loglevel:
return
if not isinstance(array, (np.ndarray, pd.DataFrame, pd.Series)):
raise TypeError(f"{array} is not one of ndarray, DataFrame and Series.")
self._add_metric(name, array, loglevel)
|
Add an array with name into logging.
|
add_array
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def add_any(self, name: str, obj: Any, loglevel: int | LogLevel = LogLevel.PERIODIC) -> None:
"""Log something with any type.
As it's an "any" object, the only LogWriter accepting it is pickle.
Therefore, pickle must be able to serialize it.
"""
if loglevel < self._min_loglevel:
return
# FIXME: detect and rescue object that could be scalar or array
self._add_metric(name, obj, loglevel)
|
Log something with any type.
As it's an "any" object, the only LogWriter accepting it is pickle.
Therefore, pickle must be able to serialize it.
|
add_any
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def clear(self):
"""Clear all the metrics for a fresh start.
To make the logger instance reusable.
"""
self.episode_count = self.step_count = 0
self.active_env_ids = set()
|
Clear all the metrics for a fresh start.
To make the logger instance reusable.
|
clear
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def state_dict(self) -> dict:
"""Save the states of the logger to a dict."""
return {
"episode_count": self.episode_count,
"step_count": self.step_count,
"global_step": self.global_step,
"global_episode": self.global_episode,
"active_env_ids": self.active_env_ids,
"episode_lengths": self.episode_lengths,
"episode_rewards": self.episode_rewards,
"episode_logs": self.episode_logs,
}
|
Save the states of the logger to a dict.
|
state_dict
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def load_state_dict(self, state_dict: dict) -> None:
"""Load the states of current logger from a dict."""
self.episode_count = state_dict["episode_count"]
self.step_count = state_dict["step_count"]
self.global_step = state_dict["global_step"]
self.global_episode = state_dict["global_episode"]
# These are runtime infos.
# Though they are loaded, I don't think it really helps.
self.active_env_ids = state_dict["active_env_ids"]
self.episode_lengths = state_dict["episode_lengths"]
self.episode_rewards = state_dict["episode_rewards"]
self.episode_logs = state_dict["episode_logs"]
|
Load the states of current logger from a dict.
|
load_state_dict
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def aggregation(array: Sequence[Any], name: str | None = None) -> Any:
"""Aggregation function from step-wise to episode-wise.
If it's a sequence of float, take the mean.
Otherwise, take the first element.
If a name is specified and,
- if it's ``reward``, the reduction will be sum.
"""
assert len(array) > 0, "The aggregated array must be not empty."
if all(isinstance(v, float) for v in array):
if name == "reward":
return np.sum(array)
return np.mean(array)
else:
return array[0]
|
Aggregation function from step-wise to episode-wise.
If it's a sequence of float, take the mean.
Otherwise, take the first element.
If a name is specified and,
- if it's ``reward``, the reduction will be sum.
|
aggregation
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def log_episode(self, length: int, rewards: List[float], contents: List[Dict[str, Any]]) -> None:
"""This is triggered at the end of each trajectory.
Parameters
----------
length
Length of this trajectory.
rewards
A list of rewards at each step of this episode.
contents
Logged contents for every step.
"""
|
This is triggered at the end of each trajectory.
Parameters
----------
length
Length of this trajectory.
rewards
A list of rewards at each step of this episode.
contents
Logged contents for every step.
|
log_episode
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def log_step(self, reward: float, contents: Dict[str, Any]) -> None:
"""This is triggered at each step.
Parameters
----------
reward
Reward for this step.
contents
Logged contents for this step.
"""
|
This is triggered at each step.
Parameters
----------
reward
Reward for this step.
contents
Logged contents for this step.
|
log_step
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def on_env_step(self, env_id: int, obs: ObsType, rew: float, done: bool, info: InfoDict) -> None:
"""Callback for finite env, on each step."""
# Update counter
self.global_step += 1
self.step_count += 1
self.active_env_ids.add(env_id)
self.episode_lengths[env_id] += 1
# TODO: reward can be a list of list for MARL
self.episode_rewards[env_id].append(rew)
values: Dict[str, Any] = {}
for key, (loglevel, value) in info["log"].items():
if loglevel >= self.loglevel: # FIXME: this is actually incorrect (see last FIXME)
values[key] = value
self.episode_logs[env_id].append(values)
self.log_step(rew, values)
if done:
# Update counter
self.global_episode += 1
self.episode_count += 1
self.log_episode(self.episode_lengths[env_id], self.episode_rewards[env_id], self.episode_logs[env_id])
|
Callback for finite env, on each step.
|
on_env_step
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def episode_metrics(self) -> dict[str, float]:
"""Retrieve the numeric metrics of the latest episode."""
if self._latest_metrics is None:
raise ValueError("No episode metrics available yet.")
return self._latest_metrics
|
Retrieve the numeric metrics of the latest episode.
|
episode_metrics
|
python
|
microsoft/qlib
|
qlib/rl/utils/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/rl/utils/log.py
|
MIT
|
def __init__(
self,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
trade_exchange: Exchange = None,
) -> None:
"""
Parameters
----------
outer_trade_decision : BaseTradeDecision, optional
the trade decision of outer strategy which this strategy relies, and it will be traded in
[start_time, end_time], by default None
- If the strategy is used to split trade decision, it will be used
- If the strategy is used for portfolio management, it can be ignored
level_infra : LevelInfrastructure, optional
level shared infrastructure for backtesting, including trade calendar
common_infra : CommonInfrastructure, optional
common infrastructure for backtesting, including trade_account, trade_exchange, .etc
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allows different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is
recommended because it run faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
"""
self._reset(level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision)
self._trade_exchange = trade_exchange
|
Parameters
----------
outer_trade_decision : BaseTradeDecision, optional
the trade decision of outer strategy which this strategy relies, and it will be traded in
[start_time, end_time], by default None
- If the strategy is used to split trade decision, it will be used
- If the strategy is used for portfolio management, it can be ignored
level_infra : LevelInfrastructure, optional
level shared infrastructure for backtesting, including trade calendar
common_infra : CommonInfrastructure, optional
common infrastructure for backtesting, including trade_account, trade_exchange, .etc
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allows different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is
recommended because it run faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
|
__init__
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision: BaseTradeDecision = None,
**kwargs,
) -> None:
"""
- reset `level_infra`, used to reset trade calendar, .etc
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
- reset `outer_trade_decision`, used to make split decision
**NOTE**:
split this function into `reset` and `_reset` will make following cases more convenient
1. Users want to initialize his strategy by overriding `reset`, but they don't want to affect the `_reset`
called when initialization
"""
self._reset(
level_infra=level_infra,
common_infra=common_infra,
outer_trade_decision=outer_trade_decision,
)
|
- reset `level_infra`, used to reset trade calendar, .etc
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
- reset `outer_trade_decision`, used to make split decision
**NOTE**:
split this function into `reset` and `_reset` will make following cases more convenient
1. Users want to initialize his strategy by overriding `reset`, but they don't want to affect the `_reset`
called when initialization
|
reset
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def _reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision: BaseTradeDecision = None,
):
"""
Please refer to the docs of `reset`
"""
if level_infra is not None:
self.reset_level_infra(level_infra)
if common_infra is not None:
self.reset_common_infra(common_infra)
if outer_trade_decision is not None:
self.outer_trade_decision = outer_trade_decision
|
Please refer to the docs of `reset`
|
_reset
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def generate_trade_decision(
self,
execute_result: list = None,
) -> Union[BaseTradeDecision, Generator[Any, Any, BaseTradeDecision]]:
"""Generate trade decision in each trading bar
Parameters
----------
execute_result : List[object], optional
the executed result for trade decision, by default None
- When call the generate_trade_decision firstly, `execute_result` could be None
"""
raise NotImplementedError("generate_trade_decision is not implemented!")
|
Generate trade decision in each trading bar
Parameters
----------
execute_result : List[object], optional
the executed result for trade decision, by default None
- When call the generate_trade_decision firstly, `execute_result` could be None
|
generate_trade_decision
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def update_trade_decision(
trade_decision: BaseTradeDecision,
trade_calendar: TradeCalendarManager,
) -> Optional[BaseTradeDecision]:
"""
update trade decision in each step of inner execution, this method enable all order
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision that will be updated
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
"""
# default to return None, which indicates that the trade decision is not changed
return None
|
update trade decision in each step of inner execution, this method enable all order
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision that will be updated
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
|
update_trade_decision
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def alter_outer_trade_decision(self, outer_trade_decision: BaseTradeDecision) -> BaseTradeDecision:
"""
A method for updating the outer_trade_decision.
The outer strategy may change its decision during updating.
Parameters
----------
outer_trade_decision : BaseTradeDecision
the decision updated by the outer strategy
Returns
-------
BaseTradeDecision
"""
# default to reset the decision directly
# NOTE: normally, user should do something to the strategy due to the change of outer decision
return outer_trade_decision
|
A method for updating the outer_trade_decision.
The outer strategy may change its decision during updating.
Parameters
----------
outer_trade_decision : BaseTradeDecision
the decision updated by the outer strategy
Returns
-------
BaseTradeDecision
|
alter_outer_trade_decision
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def post_upper_level_exe_step(self) -> None:
"""
A hook for doing sth after the upper level executor finished its execution (for example, finalize
the metrics collection).
"""
|
A hook for doing sth after the upper level executor finished its execution (for example, finalize
the metrics collection).
|
post_upper_level_exe_step
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def post_exe_step(self, execute_result: Optional[list]) -> None:
"""
A hook for doing sth after the corresponding executor finished its execution.
Parameters
----------
execute_result :
the execution result
"""
|
A hook for doing sth after the corresponding executor finished its execution.
Parameters
----------
execute_result :
the execution result
|
post_exe_step
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def __init__(
self,
policy,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
) -> None:
"""
Parameters
----------
policy :
RL policy for generate action
"""
super(RLStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
|
Parameters
----------
policy :
RL policy for generate action
|
__init__
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def __init__(
self,
policy,
state_interpreter: dict | StateInterpreter,
action_interpreter: dict | ActionInterpreter,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
) -> None:
"""
Parameters
----------
state_interpreter : Union[dict, StateInterpreter]
interpreter that interprets the qlib execute result into rl env state
action_interpreter : Union[dict, ActionInterpreter]
interpreter that interprets the rl agent action into qlib order list
start_time : Union[str, pd.Timestamp], optional
start time of trading, by default None
end_time : Union[str, pd.Timestamp], optional
end time of trading, by default None
"""
super(RLIntStrategy, self).__init__(policy, outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
self.state_interpreter = init_instance_by_config(state_interpreter, accept_types=StateInterpreter)
self.action_interpreter = init_instance_by_config(action_interpreter, accept_types=ActionInterpreter)
|
Parameters
----------
state_interpreter : Union[dict, StateInterpreter]
interpreter that interprets the qlib execute result into rl env state
action_interpreter : Union[dict, ActionInterpreter]
interpreter that interprets the rl agent action into qlib order list
start_time : Union[str, pd.Timestamp], optional
start time of trading, by default None
end_time : Union[str, pd.Timestamp], optional
end time of trading, by default None
|
__init__
|
python
|
microsoft/qlib
|
qlib/strategy/base.py
|
https://github.com/microsoft/qlib/blob/master/qlib/strategy/base.py
|
MIT
|
def download(self, url: str, target_path: [Path, str]):
"""
Download a file from the specified url.
Parameters
----------
url: str
The url of the data.
target_path: str
The location where the data is saved, including the file name.
"""
file_name = str(target_path).rsplit("/", maxsplit=1)[-1]
resp = requests.get(url, stream=True, timeout=60)
resp.raise_for_status()
if resp.status_code != 200:
raise requests.exceptions.HTTPError()
chunk_size = 1024
logger.warning(
f"The data for the example is collected from Yahoo Finance. Please be aware that the quality of the data might not be perfect. (You can refer to the original data source: https://finance.yahoo.com/lookup.)"
)
logger.info(f"{os.path.basename(file_name)} downloading......")
with tqdm(total=int(resp.headers.get("Content-Length", 0))) as p_bar:
with target_path.open("wb") as fp:
for chunk in resp.iter_content(chunk_size=chunk_size):
fp.write(chunk)
p_bar.update(chunk_size)
|
Download a file from the specified url.
Parameters
----------
url: str
The url of the data.
target_path: str
The location where the data is saved, including the file name.
|
download
|
python
|
microsoft/qlib
|
qlib/tests/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/tests/data.py
|
MIT
|
def download_data(self, file_name: str, target_dir: [Path, str], delete_old: bool = True):
"""
Download the specified file to the target folder.
Parameters
----------
target_dir: str
data save directory
file_name: str
dataset name, needs to endwith .zip, value from [rl_data.zip, csv_data_cn.zip, ...]
may contain folder names, for example: v2/qlib_data_simple_cn_1d_latest.zip
delete_old: bool
delete an existing directory, by default True
Examples
---------
# get rl data
python get_data.py download_data --file_name rl_data.zip --target_dir ~/.qlib/qlib_data/rl_data
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/rl_data.zip?{token}
# get cn csv data
python get_data.py download_data --file_name csv_data_cn.zip --target_dir ~/.qlib/csv_data/cn_data
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/csv_data_cn.zip?{token}
-------
"""
target_dir = Path(target_dir).expanduser()
target_dir.mkdir(exist_ok=True, parents=True)
# saved file name
_target_file_name = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + "_" + os.path.basename(file_name)
target_path = target_dir.joinpath(_target_file_name)
url = self.merge_remote_url(file_name)
self.download(url=url, target_path=target_path)
self._unzip(target_path, target_dir, delete_old)
if self.delete_zip_file:
target_path.unlink()
|
Download the specified file to the target folder.
Parameters
----------
target_dir: str
data save directory
file_name: str
dataset name, needs to endwith .zip, value from [rl_data.zip, csv_data_cn.zip, ...]
may contain folder names, for example: v2/qlib_data_simple_cn_1d_latest.zip
delete_old: bool
delete an existing directory, by default True
Examples
---------
# get rl data
python get_data.py download_data --file_name rl_data.zip --target_dir ~/.qlib/qlib_data/rl_data
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/rl_data.zip?{token}
# get cn csv data
python get_data.py download_data --file_name csv_data_cn.zip --target_dir ~/.qlib/csv_data/cn_data
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/csv_data_cn.zip?{token}
-------
|
download_data
|
python
|
microsoft/qlib
|
qlib/tests/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/tests/data.py
|
MIT
|
def qlib_data(
self,
name="qlib_data",
target_dir="~/.qlib/qlib_data/cn_data",
version=None,
interval="1d",
region="cn",
delete_old=True,
exists_skip=False,
):
"""download cn qlib data from remote
Parameters
----------
target_dir: str
data save directory
name: str
dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data
version: str
data version, value from [v1, ...], by default None(use script to specify version)
interval: str
data freq, value from [1d], by default 1d
region: str
data region, value from [cn, us], by default cn
delete_old: bool
delete an existing directory, by default True
exists_skip: bool
exists skip, by default False
Examples
---------
# get 1d data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/v2/qlib_data_cn_1d_latest.zip?{token}
# get 1min data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/v2/qlib_data_cn_1min_latest.zip?{token}
-------
"""
if exists_skip and exists_qlib_data(target_dir):
logger.warning(
f"Data already exists: {target_dir}, the data download will be skipped\n"
f"\tIf downloading is required: `exists_skip=False` or `change target_dir`"
)
return
qlib_version = ".".join(re.findall(r"(\d+)\.+", qlib.__version__))
def _get_file_name_with_version(qlib_version, dataset_version):
dataset_version = "v2" if dataset_version is None else dataset_version
file_name_with_version = f"{dataset_version}/{name}_{region.lower()}_{interval.lower()}_{qlib_version}.zip"
return file_name_with_version
file_name = _get_file_name_with_version(qlib_version, dataset_version=version)
if not self.check_dataset(file_name):
file_name = _get_file_name_with_version("latest", dataset_version=version)
self.download_data(file_name.lower(), target_dir, delete_old)
|
download cn qlib data from remote
Parameters
----------
target_dir: str
data save directory
name: str
dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data
version: str
data version, value from [v1, ...], by default None(use script to specify version)
interval: str
data freq, value from [1d], by default 1d
region: str
data region, value from [cn, us], by default cn
delete_old: bool
delete an existing directory, by default True
exists_skip: bool
exists skip, by default False
Examples
---------
# get 1d data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/v2/qlib_data_cn_1d_latest.zip?{token}
# get 1min data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn
When this command is run, the data will be downloaded from this link: https://qlibpublic.blob.core.windows.net/data/default/stock_data/v2/qlib_data_cn_1min_latest.zip?{token}
-------
|
qlib_data
|
python
|
microsoft/qlib
|
qlib/tests/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/tests/data.py
|
MIT
|
def robust_zscore(x: pd.Series, zscore=False):
"""Robust ZScore Normalization
Use robust statistics for Z-Score normalization:
mean(x) = median(x)
std(x) = MAD(x) * 1.4826
Reference:
https://en.wikipedia.org/wiki/Median_absolute_deviation.
"""
x = x - x.median()
mad = x.abs().median()
x = np.clip(x / mad / 1.4826, -3, 3)
if zscore:
x -= x.mean()
x /= x.std()
return x
|
Robust ZScore Normalization
Use robust statistics for Z-Score normalization:
mean(x) = median(x)
std(x) = MAD(x) * 1.4826
Reference:
https://en.wikipedia.org/wiki/Median_absolute_deviation.
|
robust_zscore
|
python
|
microsoft/qlib
|
qlib/utils/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/data.py
|
MIT
|
def deepcopy_basic_type(obj: object) -> object:
"""
deepcopy an object without copy the complicated objects.
This is useful when you want to generate Qlib tasks and share the handler
NOTE:
- This function can't handle recursive objects!!!!!
Parameters
----------
obj : object
the object to be copied
Returns
-------
object:
The copied object
"""
if isinstance(obj, tuple):
return tuple(deepcopy_basic_type(i) for i in obj)
elif isinstance(obj, list):
return list(deepcopy_basic_type(i) for i in obj)
elif isinstance(obj, dict):
return {k: deepcopy_basic_type(v) for k, v in obj.items()}
else:
return obj
|
deepcopy an object without copy the complicated objects.
This is useful when you want to generate Qlib tasks and share the handler
NOTE:
- This function can't handle recursive objects!!!!!
Parameters
----------
obj : object
the object to be copied
Returns
-------
object:
The copied object
|
deepcopy_basic_type
|
python
|
microsoft/qlib
|
qlib/utils/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/data.py
|
MIT
|
def update_config(base_config: dict, ext_config: Union[dict, List[dict]]):
"""
supporting adding base config based on the ext_config
>>> bc = {"a": "xixi"}
>>> ec = {"b": "haha"}
>>> new_bc = update_config(bc, ec)
>>> print(new_bc)
{'a': 'xixi', 'b': 'haha'}
>>> print(bc) # base config should not be changed
{'a': 'xixi'}
>>> print(update_config(bc, {"b": S_DROP}))
{'a': 'xixi'}
>>> print(update_config(new_bc, {"b": S_DROP}))
{'a': 'xixi'}
"""
base_config = deepcopy(base_config) # in case of modifying base config
for ec in ext_config if isinstance(ext_config, (list, tuple)) else [ext_config]:
for key in ec:
if key not in base_config:
# if it is not in the default key, then replace it.
# ADD if not drop
if ec[key] != S_DROP:
base_config[key] = ec[key]
else:
if isinstance(base_config[key], dict) and isinstance(ec[key], dict):
# Recursive
# Both of them are dict, then update it nested
base_config[key] = update_config(base_config[key], ec[key])
elif ec[key] == S_DROP:
# DROP
del base_config[key]
else:
# REPLACE
# one of then are not dict. Then replace
base_config[key] = ec[key]
return base_config
|
supporting adding base config based on the ext_config
>>> bc = {"a": "xixi"}
>>> ec = {"b": "haha"}
>>> new_bc = update_config(bc, ec)
>>> print(new_bc)
{'a': 'xixi', 'b': 'haha'}
>>> print(bc) # base config should not be changed
{'a': 'xixi'}
>>> print(update_config(bc, {"b": S_DROP}))
{'a': 'xixi'}
>>> print(update_config(new_bc, {"b": S_DROP}))
{'a': 'xixi'}
|
update_config
|
python
|
microsoft/qlib
|
qlib/utils/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/data.py
|
MIT
|
def guess_horizon(label: List):
"""
Try to guess the horizon by parsing label
"""
expr = DatasetProvider.parse_fields(label)[0]
lft_etd, rght_etd = expr.get_extended_window_size()
return rght_etd
|
Try to guess the horizon by parsing label
|
guess_horizon
|
python
|
microsoft/qlib
|
qlib/utils/data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/data.py
|
MIT
|
def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False):
"""Create or get a file or directory given the path and return_dir.
Parameters
----------
path: a string indicates the path or None indicates creating a temporary path.
return_dir: if True, create and return a directory; otherwise c&r a file.
"""
if path:
if return_dir and not os.path.exists(path):
os.makedirs(path)
elif not return_dir: # return a file, thus we need to create its parent directory
xpath = os.path.abspath(os.path.join(path, ".."))
if not os.path.exists(xpath):
os.makedirs(xpath)
else:
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if return_dir:
_, path = tempfile.mkdtemp(dir=temp_dir)
else:
_, path = tempfile.mkstemp(dir=temp_dir)
return path
|
Create or get a file or directory given the path and return_dir.
Parameters
----------
path: a string indicates the path or None indicates creating a temporary path.
return_dir: if True, create and return a directory; otherwise c&r a file.
|
get_or_create_path
|
python
|
microsoft/qlib
|
qlib/utils/file.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/file.py
|
MIT
|
def save_multiple_parts_file(filename, format="gztar"):
"""Save multiple parts file
Implementation process:
1. get the absolute path to 'filename'
2. create a 'filename' directory
3. user does something with file_path('filename/')
4. remove 'filename' directory
5. make_archive 'filename' directory, and rename 'archive file' to filename
:param filename: result model path
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: real model path
Usage::
>>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files.
>>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir:
... for i in range(10):
... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i)))
... with open(temp_path) as fp:
... fp.write(str(i))
...
"""
if filename.startswith("~"):
filename = os.path.expanduser(filename)
file_path = os.path.abspath(filename)
# Create model dir
if os.path.exists(file_path):
raise FileExistsError("ERROR: file exists: {}, cannot be create the directory.".format(file_path))
os.makedirs(file_path)
# return model dir
yield file_path
# filename dir to filename.tar.gz file
tar_file = shutil.make_archive(file_path, format=format, root_dir=file_path)
# Remove filename dir
if os.path.exists(file_path):
shutil.rmtree(file_path)
# filename.tar.gz rename to filename
os.rename(tar_file, file_path)
|
Save multiple parts file
Implementation process:
1. get the absolute path to 'filename'
2. create a 'filename' directory
3. user does something with file_path('filename/')
4. remove 'filename' directory
5. make_archive 'filename' directory, and rename 'archive file' to filename
:param filename: result model path
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: real model path
Usage::
>>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files.
>>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir:
... for i in range(10):
... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i)))
... with open(temp_path) as fp:
... fp.write(str(i))
...
|
save_multiple_parts_file
|
python
|
microsoft/qlib
|
qlib/utils/file.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/file.py
|
MIT
|
def unpack_archive_with_buffer(buffer, format="gztar"):
"""Unpack archive with archive buffer
After the call is finished, the archive file and directory will be deleted.
Implementation process:
1. create 'tempfile' in '~/tmp/' and directory
2. 'buffer' write to 'tempfile'
3. unpack archive file('tempfile')
4. user does something with file_path('tempfile/')
5. remove 'tempfile' and 'tempfile directory'
:param buffer: bytes
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: unpack archive directory path
Usage::
>>> # The following code is to print all the file names in 'test_unpack.tar.gz'
>>> with open('test_unpack.tar.gz') as fp:
... buffer = fp.read()
...
>>> with unpack_archive_with_buffer(buffer) as temp_dir:
... for f_n in os.listdir(temp_dir):
... print(f_n)
...
"""
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
with tempfile.NamedTemporaryFile("wb", delete=False, dir=temp_dir) as fp:
fp.write(buffer)
file_path = fp.name
try:
tar_file = file_path + ".tar.gz"
os.rename(file_path, tar_file)
# Create dir
os.makedirs(file_path)
shutil.unpack_archive(tar_file, format=format, extract_dir=file_path)
# Return temp dir
yield file_path
except Exception as e:
log.error(str(e))
finally:
# Remove temp tar file
if os.path.exists(tar_file):
os.unlink(tar_file)
# Remove temp model dir
if os.path.exists(file_path):
shutil.rmtree(file_path)
|
Unpack archive with archive buffer
After the call is finished, the archive file and directory will be deleted.
Implementation process:
1. create 'tempfile' in '~/tmp/' and directory
2. 'buffer' write to 'tempfile'
3. unpack archive file('tempfile')
4. user does something with file_path('tempfile/')
5. remove 'tempfile' and 'tempfile directory'
:param buffer: bytes
:param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar"
:return: unpack archive directory path
Usage::
>>> # The following code is to print all the file names in 'test_unpack.tar.gz'
>>> with open('test_unpack.tar.gz') as fp:
... buffer = fp.read()
...
>>> with unpack_archive_with_buffer(buffer) as temp_dir:
... for f_n in os.listdir(temp_dir):
... print(f_n)
...
|
unpack_archive_with_buffer
|
python
|
microsoft/qlib
|
qlib/utils/file.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/file.py
|
MIT
|
def get_io_object(file: Union[IO, str, Path], *args, **kwargs) -> IO:
"""
providing a easy interface to get an IO object
Parameters
----------
file : Union[IO, str, Path]
a object representing the file
Returns
-------
IO:
a IO-like object
Raises
------
NotImplementedError:
"""
if isinstance(file, IO):
yield file
else:
if isinstance(file, str):
file = Path(file)
if not isinstance(file, Path):
raise NotImplementedError(f"This type[{type(file)}] of input is not supported")
with file.open(*args, **kwargs) as f:
yield f
|
providing a easy interface to get an IO object
Parameters
----------
file : Union[IO, str, Path]
a object representing the file
Returns
-------
IO:
a IO-like object
Raises
------
NotImplementedError:
|
get_io_object
|
python
|
microsoft/qlib
|
qlib/utils/file.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/file.py
|
MIT
|
def concat(data_list: Union[SingleData], axis=0) -> MultiData:
"""concat all SingleData by index.
TODO: now just for SingleData.
Parameters
----------
data_list : List[SingleData]
the list of all SingleData to concat.
Returns
-------
MultiData
the MultiData with ndim == 2
"""
if axis == 0:
raise NotImplementedError(f"please implement this func when axis == 0")
elif axis == 1:
# get all index and row
all_index = set()
for index_data in data_list:
all_index = all_index | set(index_data.index)
all_index = list(all_index)
all_index.sort()
all_index_map = dict(zip(all_index, range(len(all_index))))
# concat all
tmp_data = np.full((len(all_index), len(data_list)), np.nan)
for data_id, index_data in enumerate(data_list):
assert isinstance(index_data, SingleData)
now_data_map = [all_index_map[index] for index in index_data.index]
tmp_data[now_data_map, data_id] = index_data.data
return MultiData(tmp_data, all_index)
else:
raise ValueError(f"axis must be 0 or 1")
|
concat all SingleData by index.
TODO: now just for SingleData.
Parameters
----------
data_list : List[SingleData]
the list of all SingleData to concat.
Returns
-------
MultiData
the MultiData with ndim == 2
|
concat
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def sum_by_index(data_list: Union[SingleData], new_index: list, fill_value=0) -> SingleData:
"""concat all SingleData by new index.
Parameters
----------
data_list : List[SingleData]
the list of all SingleData to sum.
new_index : list
the new_index of new SingleData.
fill_value : float
fill the missing values or replace np.nan.
Returns
-------
SingleData
the SingleData with new_index and values after sum.
"""
data_list = [data.to_dict() for data in data_list]
data_sum = {}
for id in new_index:
item_sum = 0
for data in data_list:
if id in data and not np.isnan(data[id]):
item_sum += data[id]
else:
item_sum += fill_value
data_sum[id] = item_sum
return SingleData(data_sum)
|
concat all SingleData by new index.
Parameters
----------
data_list : List[SingleData]
the list of all SingleData to sum.
new_index : list
the new_index of new SingleData.
fill_value : float
fill the missing values or replace np.nan.
Returns
-------
SingleData
the SingleData with new_index and values after sum.
|
sum_by_index
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def _convert_type(self, item):
"""
After user creates indices with Type A, user may query data with other types with the same info.
This method try to make type conversion and make query sane rather than raising KeyError strictly
Parameters
----------
item :
The item to query index
"""
if self.idx_list.dtype.type is np.datetime64:
if isinstance(item, pd.Timestamp):
# This happens often when creating index based on pandas.DatetimeIndex and query with pd.Timestamp
return item.to_numpy().astype(self.idx_list.dtype)
elif isinstance(item, np.datetime64):
# This happens often when creating index based on np.datetime64 and query with another precision
return item.astype(self.idx_list.dtype)
# NOTE: It is hard to consider every case at first.
# We just try to cover part of cases to make it more user-friendly
return item
|
After user creates indices with Type A, user may query data with other types with the same info.
This method try to make type conversion and make query sane rather than raising KeyError strictly
Parameters
----------
item :
The item to query index
|
_convert_type
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def index(self, item) -> int:
"""
Given the index value, get the integer index
Parameters
----------
item :
The item to query
Returns
-------
int:
The index of the item
Raises
------
KeyError:
If the query item does not exist
"""
try:
return self.index_map[self._convert_type(item)]
except IndexError as index_e:
raise KeyError(f"{item} can't be found in {self}") from index_e
|
Given the index value, get the integer index
Parameters
----------
item :
The item to query
Returns
-------
int:
The index of the item
Raises
------
KeyError:
If the query item does not exist
|
index
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def sort(self) -> Tuple["Index", np.ndarray]:
"""
sort the index
Returns
-------
Tuple["Index", np.ndarray]:
the sorted Index and the changed index
"""
sorted_idx = np.argsort(self.idx_list)
idx = Index(self.idx_list[sorted_idx])
idx._is_sorted = True
return idx, sorted_idx
|
sort the index
Returns
-------
Tuple["Index", np.ndarray]:
the sorted Index and the changed index
|
sort
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def proc_idx_l(indices: List[Union[List, pd.Index, Index]], data_shape: Tuple = None) -> List[Index]:
"""process the indices from user and output a list of `Index`"""
res = []
for i, idx in enumerate(indices):
res.append(Index(data_shape[i] if len(idx) == 0 else idx))
return res
|
process the indices from user and output a list of `Index`
|
proc_idx_l
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def _slc_convert(self, index: Index, indexing: slice) -> slice:
"""
convert value-based indexing to integer-based indexing.
Parameters
----------
index : Index
index data.
indexing : slice
value based indexing data with slice type for indexing.
Returns
-------
slice:
the integer based slicing
"""
if index.is_sorted():
int_start = None if indexing.start is None else bisect.bisect_left(index, indexing.start)
int_stop = None if indexing.stop is None else bisect.bisect_right(index, indexing.stop)
else:
int_start = None if indexing.start is None else index.index(indexing.start)
int_stop = None if indexing.stop is None else index.index(indexing.stop) + 1
return slice(int_start, int_stop)
|
convert value-based indexing to integer-based indexing.
Parameters
----------
index : Index
index data.
indexing : slice
value based indexing data with slice type for indexing.
Returns
-------
slice:
the integer based slicing
|
_slc_convert
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def __getitem__(self, indexing):
"""
Parameters
----------
indexing :
query for data
Raises
------
KeyError:
If the non-slice index is queried but does not exist, `KeyError` is raised.
"""
# 1) convert slices to int loc
if not isinstance(indexing, tuple):
# NOTE: tuple is not supported for indexing
indexing = (indexing,)
# TODO: create a subclass for single value query
assert len(indexing) <= len(self._indices)
int_indexing = []
for dim, index in enumerate(self._indices):
if dim < len(indexing):
_indexing = indexing[dim]
if not self._int_loc: # type converting is only necessary when it is not `iloc`
if isinstance(_indexing, slice):
_indexing = self._slc_convert(index, _indexing)
elif isinstance(_indexing, (IndexData, np.ndarray)):
if isinstance(_indexing, IndexData):
_indexing = _indexing.data
assert _indexing.ndim == 1
if _indexing.dtype != bool:
_indexing = np.array(list(index.index(i) for i in _indexing))
else:
_indexing = index.index(_indexing)
else:
# Default to select all when user input is not given
_indexing = slice(None)
int_indexing.append(_indexing)
# 2) select data and index
new_data = self._bind_id.data[tuple(int_indexing)]
# return directly if it is scalar
if new_data.ndim == 0:
return new_data
# otherwise we go on to the index part
new_indices = [idx[indexing] for idx, indexing in zip(self._indices, int_indexing)]
# 3) squash dimensions
new_indices = [
idx for idx in new_indices if isinstance(idx, np.ndarray) and idx.ndim > 0
] # squash the zero dim indexing
if new_data.ndim == 1:
cls = SingleData
elif new_data.ndim == 2:
cls = MultiData
else:
raise ValueError("Not supported")
return cls(new_data, *new_indices)
|
Parameters
----------
indexing :
query for data
Raises
------
KeyError:
If the non-slice index is queried but does not exist, `KeyError` is raised.
|
__getitem__
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def index_data_ops_creator(*args, **kwargs):
"""
meta class for auto generating operations for index data.
"""
for method_name in ["__add__", "__sub__", "__rsub__", "__mul__", "__truediv__", "__eq__", "__gt__", "__lt__"]:
args[2][method_name] = BinaryOps(method_name=method_name)
return type(*args)
|
meta class for auto generating operations for index data.
|
index_data_ops_creator
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def abs(self):
"""get the abs of data except np.nan."""
tmp_data = np.absolute(self.data)
return self.__class__(tmp_data, *self.indices)
|
get the abs of data except np.nan.
|
abs
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def __init__(
self, data: Union[int, float, np.number, list, dict, pd.Series] = [], index: Union[List, pd.Index, Index] = []
):
"""A data structure of index and numpy data.
It's used to replace pd.Series due to high-speed.
Parameters
----------
data : Union[int, float, np.number, list, dict, pd.Series]
the input data
index : Union[list, pd.Index]
the index of data.
empty list indicates that auto filling the index to the length of data
"""
# for special data type
if isinstance(data, dict):
assert len(index) == 0
if len(data) > 0:
index, data = zip(*data.items())
else:
index, data = [], []
elif isinstance(data, pd.Series):
assert len(index) == 0
index, data = data.index, data.values
elif isinstance(data, (int, float, np.number)):
data = [data]
super().__init__(data, index)
assert self.ndim == 1
|
A data structure of index and numpy data.
It's used to replace pd.Series due to high-speed.
Parameters
----------
data : Union[int, float, np.number, list, dict, pd.Series]
the input data
index : Union[list, pd.Index]
the index of data.
empty list indicates that auto filling the index to the length of data
|
__init__
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def reindex(self, index: Index, fill_value=np.nan) -> SingleData:
"""reindex data and fill the missing value with np.nan.
Parameters
----------
new_index : list
new index
fill_value:
what value to fill if index is missing
Returns
-------
SingleData
reindex data
"""
# TODO: This method can be more general
if self.index == index:
return self
tmp_data = np.full(len(index), fill_value, dtype=np.float64)
for index_id, index_item in enumerate(index):
try:
tmp_data[index_id] = self.loc[index_item]
except KeyError:
pass
return SingleData(tmp_data, index)
|
reindex data and fill the missing value with np.nan.
Parameters
----------
new_index : list
new index
fill_value:
what value to fill if index is missing
Returns
-------
SingleData
reindex data
|
reindex
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def __init__(
self,
data: Union[int, float, np.number, list] = [],
index: Union[List, pd.Index, Index] = [],
columns: Union[List, pd.Index, Index] = [],
):
"""A data structure of index and numpy data.
It's used to replace pd.DataFrame due to high-speed.
Parameters
----------
data : Union[list, np.ndarray]
the dim of data must be 2.
index : Union[List, pd.Index, Index]
the index of data.
columns: Union[List, pd.Index, Index]
the columns of data.
"""
if isinstance(data, pd.DataFrame):
index, columns, data = data.index, data.columns, data.values
super().__init__(data, index, columns)
assert self.ndim == 2
|
A data structure of index and numpy data.
It's used to replace pd.DataFrame due to high-speed.
Parameters
----------
data : Union[list, np.ndarray]
the dim of data must be 2.
index : Union[List, pd.Index, Index]
the index of data.
columns: Union[List, pd.Index, Index]
the columns of data.
|
__init__
|
python
|
microsoft/qlib
|
qlib/utils/index_data.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/index_data.py
|
MIT
|
def get_module_by_module_path(module_path: Union[str, ModuleType]):
"""Load module path
:param module_path:
:return:
:raises: ModuleNotFoundError
"""
if module_path is None:
raise ModuleNotFoundError("None is passed in as parameters as module_path")
if isinstance(module_path, ModuleType):
module = module_path
else:
if module_path.endswith(".py"):
module_name = re.sub("^[^a-zA-Z_]+", "", re.sub("[^0-9a-zA-Z_]", "", module_path[:-3].replace("/", "_")))
module_spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
module_spec.loader.exec_module(module)
else:
module = importlib.import_module(module_path)
return module
|
Load module path
:param module_path:
:return:
:raises: ModuleNotFoundError
|
get_module_by_module_path
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def split_module_path(module_path: str) -> Tuple[str, str]:
"""
Parameters
----------
module_path : str
e.g. "a.b.c.ClassName"
Returns
-------
Tuple[str, str]
e.g. ("a.b.c", "ClassName")
"""
*m_path, cls = module_path.split(".")
m_path = ".".join(m_path)
return m_path, cls
|
Parameters
----------
module_path : str
e.g. "a.b.c.ClassName"
Returns
-------
Tuple[str, str]
e.g. ("a.b.c", "ClassName")
|
split_module_path
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def get_callable_kwargs(config: InstConf, default_module: Union[str, ModuleType] = None) -> (type, dict):
"""
extract class/func and kwargs from config info
Parameters
----------
config : [dict, str]
similar to config
please refer to the doc of init_instance_by_config
default_module : Python module or str
It should be a python module to load the class type
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
Returns
-------
(type, dict):
the class/func object and it's arguments.
Raises
------
ModuleNotFoundError
"""
if isinstance(config, dict):
key = "class" if "class" in config else "func"
if isinstance(config[key], str):
# 1) get module and class
# - case 1): "a.b.c.ClassName"
# - case 2): {"class": "ClassName", "module_path": "a.b.c"}
m_path, cls = split_module_path(config[key])
if m_path == "":
m_path = config.get("module_path", default_module)
module = get_module_by_module_path(m_path)
# 2) get callable
_callable = getattr(module, cls) # may raise AttributeError
else:
_callable = config[key] # the class type itself is passed in
kwargs = config.get("kwargs", {})
elif isinstance(config, str):
# a.b.c.ClassName
m_path, cls = split_module_path(config)
module = get_module_by_module_path(default_module if m_path == "" else m_path)
_callable = getattr(module, cls)
kwargs = {}
else:
raise NotImplementedError(f"This type of input is not supported")
return _callable, kwargs
|
extract class/func and kwargs from config info
Parameters
----------
config : [dict, str]
similar to config
please refer to the doc of init_instance_by_config
default_module : Python module or str
It should be a python module to load the class type
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
Returns
-------
(type, dict):
the class/func object and it's arguments.
Raises
------
ModuleNotFoundError
|
get_callable_kwargs
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def init_instance_by_config(
config: InstConf,
default_module=None,
accept_types: Union[type, Tuple[type]] = (),
try_kwargs: Dict = {},
**kwargs,
) -> Any:
"""
get initialized instance with config
Parameters
----------
config : InstConf
default_module : Python module
Optional. It should be a python module.
NOTE: the "module_path" will be override by `module` arguments
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
accept_types: Union[type, Tuple[type]]
Optional. If the config is a instance of specific type, return the config directly.
This will be passed into the second parameter of isinstance.
try_kwargs: Dict
Try to pass in kwargs in `try_kwargs` when initialized the instance
If error occurred, it will fail back to initialization without try_kwargs.
Returns
-------
object:
An initialized object based on the config info
"""
if isinstance(config, accept_types):
return config
if isinstance(config, (str, Path)):
if isinstance(config, str):
# path like 'file:///<path to pickle file>/obj.pkl'
pr = urlparse(config)
if pr.scheme == "file":
# To enable relative path like file://data/a/b/c.pkl. pr.netloc will be data
path = pr.path
if pr.netloc != "":
path = path.lstrip("/")
pr_path = os.path.join(pr.netloc, path) if bool(pr.path) else pr.netloc
with open(os.path.normpath(pr_path), "rb") as f:
return pickle.load(f)
else:
with config.open("rb") as f:
return pickle.load(f)
klass, cls_kwargs = get_callable_kwargs(config, default_module=default_module)
try:
return klass(**cls_kwargs, **try_kwargs, **kwargs)
except (TypeError,):
# TypeError for handling errors like
# 1: `XXX() got multiple values for keyword argument 'YYY'`
# 2: `XXX() got an unexpected keyword argument 'YYY'
return klass(**cls_kwargs, **kwargs)
|
get initialized instance with config
Parameters
----------
config : InstConf
default_module : Python module
Optional. It should be a python module.
NOTE: the "module_path" will be override by `module` arguments
This function will load class from the config['module_path'] first.
If config['module_path'] doesn't exists, it will load the class from default_module.
accept_types: Union[type, Tuple[type]]
Optional. If the config is a instance of specific type, return the config directly.
This will be passed into the second parameter of isinstance.
try_kwargs: Dict
Try to pass in kwargs in `try_kwargs` when initialized the instance
If error occurred, it will fail back to initialization without try_kwargs.
Returns
-------
object:
An initialized object based on the config info
|
init_instance_by_config
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def class_casting(obj: object, cls: type):
"""
Python doesn't provide the downcasting mechanism.
We use the trick here to downcast the class
Parameters
----------
obj : object
the object to be cast
cls : type
the target class type
"""
orig_cls = obj.__class__
obj.__class__ = cls
yield
obj.__class__ = orig_cls
|
Python doesn't provide the downcasting mechanism.
We use the trick here to downcast the class
Parameters
----------
obj : object
the object to be cast
cls : type
the target class type
|
class_casting
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def find_all_classes(module_path: Union[str, ModuleType], cls: type) -> List[type]:
"""
Find all the classes recursively that inherit from `cls` in a given module.
- `cls` itself is also included
>>> from qlib.data.dataset.handler import DataHandler
>>> find_all_classes("qlib.contrib.data.handler", DataHandler)
[<class 'qlib.contrib.data.handler.Alpha158'>, <class 'qlib.contrib.data.handler.Alpha158vwap'>, <class 'qlib.contrib.data.handler.Alpha360'>, <class 'qlib.contrib.data.handler.Alpha360vwap'>, <class 'qlib.data.dataset.handler.DataHandlerLP'>]
TODO:
- skip import error
"""
if isinstance(module_path, ModuleType):
mod = module_path
else:
mod = importlib.import_module(module_path)
cls_list = []
def _append_cls(obj):
# Leverage the closure trick to reuse code
if isinstance(obj, type) and issubclass(obj, cls) and cls not in cls_list:
cls_list.append(obj)
for attr in dir(mod):
_append_cls(getattr(mod, attr))
if hasattr(mod, "__path__"):
# if the model is a package
for _, modname, _ in pkgutil.iter_modules(mod.__path__):
sub_mod = importlib.import_module(f"{mod.__package__}.{modname}")
for m_cls in find_all_classes(sub_mod, cls):
_append_cls(m_cls)
return cls_list
|
Find all the classes recursively that inherit from `cls` in a given module.
- `cls` itself is also included
>>> from qlib.data.dataset.handler import DataHandler
>>> find_all_classes("qlib.contrib.data.handler", DataHandler)
[<class 'qlib.contrib.data.handler.Alpha158'>, <class 'qlib.contrib.data.handler.Alpha158vwap'>, <class 'qlib.contrib.data.handler.Alpha360'>, <class 'qlib.contrib.data.handler.Alpha360vwap'>, <class 'qlib.data.dataset.handler.DataHandlerLP'>]
TODO:
- skip import error
|
find_all_classes
|
python
|
microsoft/qlib
|
qlib/utils/mod.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/mod.py
|
MIT
|
def datetime_groupby_apply(
df, apply_func: Union[Callable, Text], axis=0, level="datetime", resample_rule="ME", n_jobs=-1
):
"""datetime_groupby_apply
This function will apply the `apply_func` on the datetime level index.
Parameters
----------
df :
DataFrame for processing
apply_func : Union[Callable, Text]
apply_func for processing the data
if a string is given, then it is treated as naive pandas function
axis :
which axis is the datetime level located
level :
which level is the datetime level
resample_rule :
How to resample the data to calculating parallel
n_jobs :
n_jobs for joblib
Returns:
pd.DataFrame
"""
def _naive_group_apply(df):
if isinstance(apply_func, str):
return getattr(df.groupby(axis=axis, level=level, group_keys=False), apply_func)()
return df.groupby(level=level, group_keys=False).apply(apply_func)
if n_jobs != 1:
dfs = ParallelExt(n_jobs=n_jobs)(
delayed(_naive_group_apply)(sub_df) for idx, sub_df in df.resample(resample_rule, level=level)
)
return pd.concat(dfs, axis=axis).sort_index()
else:
return _naive_group_apply(df)
|
datetime_groupby_apply
This function will apply the `apply_func` on the datetime level index.
Parameters
----------
df :
DataFrame for processing
apply_func : Union[Callable, Text]
apply_func for processing the data
if a string is given, then it is treated as naive pandas function
axis :
which axis is the datetime level located
level :
which level is the datetime level
resample_rule :
How to resample the data to calculating parallel
n_jobs :
n_jobs for joblib
Returns:
pd.DataFrame
|
datetime_groupby_apply
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def _replace_and_get_dt(complex_iter):
"""_replace_and_get_dt.
FIXME: this function may cause infinite loop when the complex data-structure contains loop-reference
Parameters
----------
complex_iter :
complex_iter
"""
if isinstance(complex_iter, DelayedTask):
dt = complex_iter
return dt, [dt]
elif is_delayed_tuple(complex_iter):
dt = DelayedTuple(complex_iter)
return dt, [dt]
elif isinstance(complex_iter, (list, tuple)):
new_ci = []
dt_all = []
for item in complex_iter:
new_item, dt_list = _replace_and_get_dt(item)
new_ci.append(new_item)
dt_all += dt_list
return new_ci, dt_all
elif isinstance(complex_iter, dict):
new_ci = {}
dt_all = []
for key, item in complex_iter.items():
new_item, dt_list = _replace_and_get_dt(item)
new_ci[key] = new_item
dt_all += dt_list
return new_ci, dt_all
else:
return complex_iter, []
|
_replace_and_get_dt.
FIXME: this function may cause infinite loop when the complex data-structure contains loop-reference
Parameters
----------
complex_iter :
complex_iter
|
_replace_and_get_dt
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def _recover_dt(complex_iter):
"""_recover_dt.
replace all the DelayedTask in the `complex_iter` with its `.res` value
FIXME: this function may cause infinite loop when the complex data-structure contains loop-reference
Parameters
----------
complex_iter :
complex_iter
"""
if isinstance(complex_iter, DelayedTask):
return complex_iter.get_replacement()
elif isinstance(complex_iter, (list, tuple)):
return [_recover_dt(item) for item in complex_iter]
elif isinstance(complex_iter, dict):
return {key: _recover_dt(item) for key, item in complex_iter.items()}
else:
return complex_iter
|
_recover_dt.
replace all the DelayedTask in the `complex_iter` with its `.res` value
FIXME: this function may cause infinite loop when the complex data-structure contains loop-reference
Parameters
----------
complex_iter :
complex_iter
|
_recover_dt
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def complex_parallel(paral: Parallel, complex_iter):
"""complex_parallel.
Find all the delayed function created by delayed in complex_iter, run them parallelly and then replace it with the result
>>> from qlib.utils.paral import complex_parallel
>>> from joblib import Parallel, delayed
>>> complex_iter = {"a": delayed(sum)([1,2,3]), "b": [1, 2, delayed(sum)([10, 1])]}
>>> complex_parallel(Parallel(), complex_iter)
{'a': 6, 'b': [1, 2, 11]}
Parameters
----------
paral : Parallel
paral
complex_iter :
NOTE: only list, tuple and dict will be explored!!!!
Returns
-------
complex_iter whose delayed joblib tasks are replaced with its execution results.
"""
complex_iter, dt_all = _replace_and_get_dt(complex_iter)
for res, dt in zip(paral(dt.get_delayed_tuple() for dt in dt_all), dt_all):
dt.set_res(res)
complex_iter = _recover_dt(complex_iter)
return complex_iter
|
complex_parallel.
Find all the delayed function created by delayed in complex_iter, run them parallelly and then replace it with the result
>>> from qlib.utils.paral import complex_parallel
>>> from joblib import Parallel, delayed
>>> complex_iter = {"a": delayed(sum)([1,2,3]), "b": [1, 2, delayed(sum)([10, 1])]}
>>> complex_parallel(Parallel(), complex_iter)
{'a': 6, 'b': [1, 2, 11]}
Parameters
----------
paral : Parallel
paral
complex_iter :
NOTE: only list, tuple and dict will be explored!!!!
Returns
-------
complex_iter whose delayed joblib tasks are replaced with its execution results.
|
complex_parallel
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def __init__(self, func: Callable, qlib_config: QlibConfig = None):
"""
Parameters
----------
func : Callable
the function to be wrapped
qlib_config : QlibConfig
Qlib config for initialization in subprocess
Returns
-------
Callable
"""
self.func = func
self.qlib_config = qlib_config
|
Parameters
----------
func : Callable
the function to be wrapped
qlib_config : QlibConfig
Qlib config for initialization in subprocess
Returns
-------
Callable
|
__init__
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def _func_mod(self, *args, **kwargs):
"""Modify the initial function by adding Qlib initialization"""
if self.qlib_config is not None:
C.register_from_C(self.qlib_config)
return self.func(*args, **kwargs)
|
Modify the initial function by adding Qlib initialization
|
_func_mod
|
python
|
microsoft/qlib
|
qlib/utils/paral.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/paral.py
|
MIT
|
def resam_calendar(
calendar_raw: np.ndarray, freq_raw: Union[str, Freq], freq_sam: Union[str, Freq], region: str = None
) -> np.ndarray:
"""
Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam
Assumption:
- Fix length (240) of the calendar in each day.
Parameters
----------
calendar_raw : np.ndarray
The calendar with frequency freq_raw
freq_raw : str
Frequency of the raw calendar
freq_sam : str
Sample frequency
region: str
Region, for example, "cn", "us"
Returns
-------
np.ndarray
The calendar with frequency freq_sam
"""
if region is None:
region = C["region"]
freq_raw = Freq(freq_raw)
freq_sam = Freq(freq_sam)
if not len(calendar_raw):
return calendar_raw
# if freq_sam is xminute, divide each trading day into several bars evenly
if freq_sam.base == Freq.NORM_FREQ_MINUTE:
if freq_raw.base != Freq.NORM_FREQ_MINUTE:
raise ValueError("when sampling minute calendar, freq of raw calendar must be minute or min")
else:
if freq_raw.count > freq_sam.count:
raise ValueError("raw freq must be higher than sampling freq")
_calendar_minute = np.unique(list(map(lambda x: cal_sam_minute(x, freq_sam.count, region), calendar_raw)))
return _calendar_minute
# else, convert the raw calendar into day calendar, and divide the whole calendar into several bars evenly
else:
_calendar_day = np.unique(list(map(lambda x: pd.Timestamp(x.year, x.month, x.day, 0, 0, 0), calendar_raw)))
if freq_sam.base == Freq.NORM_FREQ_DAY:
return _calendar_day[:: freq_sam.count]
elif freq_sam.base == Freq.NORM_FREQ_WEEK:
_day_in_week = np.array(list(map(lambda x: x.dayofweek, _calendar_day)))
_calendar_week = _calendar_day[np.ediff1d(_day_in_week, to_begin=-1) < 0]
return _calendar_week[:: freq_sam.count]
elif freq_sam.base == Freq.NORM_FREQ_MONTH:
_day_in_month = np.array(list(map(lambda x: x.day, _calendar_day)))
_calendar_month = _calendar_day[np.ediff1d(_day_in_month, to_begin=-1) < 0]
return _calendar_month[:: freq_sam.count]
else:
raise ValueError("sampling freq must be xmin, xd, xw, xm")
|
Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam
Assumption:
- Fix length (240) of the calendar in each day.
Parameters
----------
calendar_raw : np.ndarray
The calendar with frequency freq_raw
freq_raw : str
Frequency of the raw calendar
freq_sam : str
Sample frequency
region: str
Region, for example, "cn", "us"
Returns
-------
np.ndarray
The calendar with frequency freq_sam
|
resam_calendar
|
python
|
microsoft/qlib
|
qlib/utils/resam.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/resam.py
|
MIT
|
def get_higher_eq_freq_feature(instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1):
"""get the feature with higher or equal frequency than `freq`.
Returns
-------
pd.DataFrame
the feature with higher or equal frequency
"""
from ..data.data import D # pylint: disable=C0415
try:
_result = D.features(instruments, fields, start_time, end_time, freq=freq, disk_cache=disk_cache)
_freq = freq
except (ValueError, KeyError) as value_key_e:
_, norm_freq = Freq.parse(freq)
if norm_freq in [Freq.NORM_FREQ_MONTH, Freq.NORM_FREQ_WEEK, Freq.NORM_FREQ_DAY]:
try:
_result = D.features(instruments, fields, start_time, end_time, freq="day", disk_cache=disk_cache)
_freq = "day"
except (ValueError, KeyError):
_result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache)
_freq = "1min"
elif norm_freq == Freq.NORM_FREQ_MINUTE:
_result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache)
_freq = "1min"
else:
raise ValueError(f"freq {freq} is not supported") from value_key_e
return _result, _freq
|
get the feature with higher or equal frequency than `freq`.
Returns
-------
pd.DataFrame
the feature with higher or equal frequency
|
get_higher_eq_freq_feature
|
python
|
microsoft/qlib
|
qlib/utils/resam.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/resam.py
|
MIT
|
def resam_ts_data(
ts_feature: Union[pd.DataFrame, pd.Series],
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
method: Union[str, Callable] = "last",
method_kwargs: dict = {},
):
"""
Resample value from time-series data
- If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time]
Example:
.. code-block::
print(feature)
$close $volume
instrument datetime
SH600000 2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
SH600655 2010-01-04 2699.567383 158193.328125
2010-01-08 2612.359619 77501.406250
2010-01-11 2712.982422 160852.390625
2010-01-12 2788.688232 164587.937500
2010-01-13 2790.604004 145460.453125
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last"))
$close $volume
instrument
SH600000 87.433578 28117442.0
SH600655 2699.567383 158193.328125
- Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly
Example:
.. code-block::
print(feature)
$close $volume
datetime
2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last"))
$close 87.433578
$volume 28117442.0
print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last"))
87.433578
Parameters
----------
ts_feature : Union[pd.DataFrame, pd.Series]
Raw time-series feature to be resampled
start_time : Union[str, pd.Timestamp], optional
start sampling time, by default None
end_time : Union[str, pd.Timestamp], optional
end sampling time, by default None
method : Union[str, Callable], optional
sample method, apply method function to each stock series data, by default "last"
- If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data
- If method is None, do nothing for the sliced time-series data.
method_kwargs : dict, optional
arguments of method, by default {}
Returns
-------
The resampled DataFrame/Series/value, return None when the resampled data is empty.
"""
selector_datetime = slice(start_time, end_time)
from ..data.dataset.utils import get_level_index # pylint: disable=C0415
feature = lazy_sort_index(ts_feature)
datetime_level = get_level_index(feature, level="datetime") == 0
if datetime_level:
feature = feature.loc[selector_datetime]
else:
feature = feature.loc(axis=0)[(slice(None), selector_datetime)]
if feature.empty:
return None
if isinstance(feature.index, pd.MultiIndex):
if callable(method):
method_func = method
return feature.groupby(level="instrument", group_keys=False).apply(method_func, **method_kwargs)
elif isinstance(method, str):
return getattr(feature.groupby(level="instrument", group_keys=False), method)(**method_kwargs)
else:
if callable(method):
method_func = method
return method_func(feature, **method_kwargs)
elif isinstance(method, str):
return getattr(feature, method)(**method_kwargs)
return feature
|
Resample value from time-series data
- If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time]
Example:
.. code-block::
print(feature)
$close $volume
instrument datetime
SH600000 2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
SH600655 2010-01-04 2699.567383 158193.328125
2010-01-08 2612.359619 77501.406250
2010-01-11 2712.982422 160852.390625
2010-01-12 2788.688232 164587.937500
2010-01-13 2790.604004 145460.453125
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last"))
$close $volume
instrument
SH600000 87.433578 28117442.0
SH600655 2699.567383 158193.328125
- Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly
Example:
.. code-block::
print(feature)
$close $volume
datetime
2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last"))
$close 87.433578
$volume 28117442.0
print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last"))
87.433578
Parameters
----------
ts_feature : Union[pd.DataFrame, pd.Series]
Raw time-series feature to be resampled
start_time : Union[str, pd.Timestamp], optional
start sampling time, by default None
end_time : Union[str, pd.Timestamp], optional
end sampling time, by default None
method : Union[str, Callable], optional
sample method, apply method function to each stock series data, by default "last"
- If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data
- If method is None, do nothing for the sliced time-series data.
method_kwargs : dict, optional
arguments of method, by default {}
Returns
-------
The resampled DataFrame/Series/value, return None when the resampled data is empty.
|
resam_ts_data
|
python
|
microsoft/qlib
|
qlib/utils/resam.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/resam.py
|
MIT
|
def _ts_data_valid(ts_feature, last=False):
"""get the first/last not nan value of pd.Series|DataFrame with single level index"""
if isinstance(ts_feature, pd.DataFrame):
return ts_feature.apply(lambda column: get_valid_value(column, last=last))
elif isinstance(ts_feature, pd.Series):
return get_valid_value(ts_feature, last=last)
else:
raise TypeError(f"ts_feature should be pd.DataFrame/Series, not {type(ts_feature)}")
|
get the first/last not nan value of pd.Series|DataFrame with single level index
|
_ts_data_valid
|
python
|
microsoft/qlib
|
qlib/utils/resam.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/resam.py
|
MIT
|
def _get_attr_list(self, attr_type: str) -> list:
"""
What attribute will not be in specific list
Parameters
----------
attr_type : str
"include" or "exclude"
Returns
-------
list:
"""
if hasattr(self, f"_{attr_type}"):
res = getattr(self, f"_{attr_type}", [])
else:
res = getattr(self.__class__, f"{attr_type}_attr", [])
if res is None:
return []
return res
|
What attribute will not be in specific list
Parameters
----------
attr_type : str
"include" or "exclude"
Returns
-------
list:
|
_get_attr_list
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def config(self, recursive=False, **kwargs):
"""
configure the serializable object
Parameters
----------
kwargs may include following keys
dump_all : bool
will the object dump all object
exclude : list
What attribute will not be dumped
include : list
What attribute will be dumped
recursive : bool
will the configuration be recursive
"""
keys = {"dump_all", "exclude", "include"}
for k, v in kwargs.items():
if k in keys:
attr_name = f"_{k}"
setattr(self, attr_name, v)
else:
raise KeyError(f"Unknown parameter: {k}")
if recursive:
for obj in self.__dict__.values():
# set flag to prevent endless loop
self.__dict__[self.FLAG_KEY] = True
if isinstance(obj, Serializable) and self.FLAG_KEY not in obj.__dict__:
obj.config(recursive=True, **kwargs)
del self.__dict__[self.FLAG_KEY]
|
configure the serializable object
Parameters
----------
kwargs may include following keys
dump_all : bool
will the object dump all object
exclude : list
What attribute will not be dumped
include : list
What attribute will be dumped
recursive : bool
will the configuration be recursive
|
config
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def to_pickle(self, path: Union[Path, str], **kwargs):
"""
Dump self to a pickle file.
path (Union[Path, str]): the path to dump
kwargs may include following keys
dump_all : bool
will the object dump all object
exclude : list
What attribute will not be dumped
include : list
What attribute will be dumped
"""
self.config(**kwargs)
with Path(path).open("wb") as f:
# pickle interface like backend; such as dill
self.get_backend().dump(self, f, protocol=C.dump_protocol_version)
|
Dump self to a pickle file.
path (Union[Path, str]): the path to dump
kwargs may include following keys
dump_all : bool
will the object dump all object
exclude : list
What attribute will not be dumped
include : list
What attribute will be dumped
|
to_pickle
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def load(cls, filepath):
"""
Load the serializable class from a filepath.
Args:
filepath (str): the path of file
Raises:
TypeError: the pickled file must be `type(cls)`
Returns:
`type(cls)`: the instance of `type(cls)`
"""
with open(filepath, "rb") as f:
object = cls.get_backend().load(f)
if isinstance(object, cls):
return object
else:
raise TypeError(f"The instance of {type(object)} is not a valid `{type(cls)}`!")
|
Load the serializable class from a filepath.
Args:
filepath (str): the path of file
Raises:
TypeError: the pickled file must be `type(cls)`
Returns:
`type(cls)`: the instance of `type(cls)`
|
load
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def get_backend(cls):
"""
Return the real backend of a Serializable class. The pickle_backend value can be "pickle" or "dill".
Returns:
module: pickle or dill module based on pickle_backend
"""
# NOTE: pickle interface like backend; such as dill
if cls.pickle_backend == "pickle":
return pickle
elif cls.pickle_backend == "dill":
return dill
else:
raise ValueError("Unknown pickle backend, please use 'pickle' or 'dill'.")
|
Return the real backend of a Serializable class. The pickle_backend value can be "pickle" or "dill".
Returns:
module: pickle or dill module based on pickle_backend
|
get_backend
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def general_dump(obj, path: Union[Path, str]):
"""
A general dumping method for object
Parameters
----------
obj : object
the object to be dumped
path : Union[Path, str]
the target path the data will be dumped
"""
path = Path(path)
if isinstance(obj, Serializable):
obj.to_pickle(path)
else:
with path.open("wb") as f:
pickle.dump(obj, f, protocol=C.dump_protocol_version)
|
A general dumping method for object
Parameters
----------
obj : object
the object to be dumped
path : Union[Path, str]
the target path the data will be dumped
|
general_dump
|
python
|
microsoft/qlib
|
qlib/utils/serial.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/serial.py
|
MIT
|
def get_min_cal(shift: int = 0, region: str = REG_CN) -> List[time]:
"""
get the minute level calendar in day period
Parameters
----------
shift : int
the shift direction would be like pandas shift.
series.shift(1) will replace the value at `i`-th with the one at `i-1`-th
region: str
Region, for example, "cn", "us"
Returns
-------
List[time]:
"""
cal = []
if region == REG_CN:
for ts in list(
pd.date_range(CN_TIME[0], CN_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift)
) + list(
pd.date_range(CN_TIME[2], CN_TIME[3] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift)
):
cal.append(ts.time())
elif region == REG_TW:
for ts in list(
pd.date_range(TW_TIME[0], TW_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift)
):
cal.append(ts.time())
elif region == REG_US:
for ts in list(
pd.date_range(US_TIME[0], US_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift)
):
cal.append(ts.time())
else:
raise ValueError(f"{region} is not supported")
return cal
|
get the minute level calendar in day period
Parameters
----------
shift : int
the shift direction would be like pandas shift.
series.shift(1) will replace the value at `i`-th with the one at `i-1`-th
region: str
Region, for example, "cn", "us"
Returns
-------
List[time]:
|
get_min_cal
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def is_single_value(start_time, end_time, freq, region: str = REG_CN):
"""Is there only one piece of data for stock market.
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for data.
end_time : Union[pd.Timestamp, str]
closed end time for data.
freq :
region: str
Region, for example, "cn", "us"
Returns
-------
bool
True means one piece of data to obtain.
"""
if region == REG_CN:
if end_time - start_time < freq:
return True
if start_time.hour == 11 and start_time.minute == 29 and start_time.second == 0:
return True
if start_time.hour == 14 and start_time.minute == 59 and start_time.second == 0:
return True
return False
elif region == REG_TW:
if end_time - start_time < freq:
return True
if start_time.hour == 13 and start_time.minute >= 25 and start_time.second == 0:
return True
return False
elif region == REG_US:
if end_time - start_time < freq:
return True
if start_time.hour == 15 and start_time.minute == 59 and start_time.second == 0:
return True
return False
else:
raise NotImplementedError(f"please implement the is_single_value func for {region}")
|
Is there only one piece of data for stock market.
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for data.
end_time : Union[pd.Timestamp, str]
closed end time for data.
freq :
region: str
Region, for example, "cn", "us"
Returns
-------
bool
True means one piece of data to obtain.
|
is_single_value
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def parse(freq: str) -> Tuple[int, str]:
"""
Parse freq into a unified format
Parameters
----------
freq : str
Raw freq, supported freq should match the re '^([0-9]*)(month|mon|week|w|day|d|minute|min)$'
Returns
-------
freq: Tuple[int, str]
Unified freq, including freq count and unified freq unit. The freq unit should be '[month|week|day|minute]'.
Example:
.. code-block::
print(Freq.parse("day"))
(1, "day" )
print(Freq.parse("2mon"))
(2, "month")
print(Freq.parse("10w"))
(10, "week")
"""
freq = freq.lower()
match_obj = re.match("^([0-9]*)(month|mon|week|w|day|d|minute|min)$", freq)
if match_obj is None:
raise ValueError(
"freq format is not supported, the freq should be like (n)month/mon, (n)week/w, (n)day/d, (n)minute/min"
)
_count = int(match_obj.group(1)) if match_obj.group(1) else 1
_freq = match_obj.group(2)
_freq_format_dict = {
"month": Freq.NORM_FREQ_MONTH,
"mon": Freq.NORM_FREQ_MONTH,
"week": Freq.NORM_FREQ_WEEK,
"w": Freq.NORM_FREQ_WEEK,
"day": Freq.NORM_FREQ_DAY,
"d": Freq.NORM_FREQ_DAY,
"minute": Freq.NORM_FREQ_MINUTE,
"min": Freq.NORM_FREQ_MINUTE,
}
return _count, _freq_format_dict[_freq]
|
Parse freq into a unified format
Parameters
----------
freq : str
Raw freq, supported freq should match the re '^([0-9]*)(month|mon|week|w|day|d|minute|min)$'
Returns
-------
freq: Tuple[int, str]
Unified freq, including freq count and unified freq unit. The freq unit should be '[month|week|day|minute]'.
Example:
.. code-block::
print(Freq.parse("day"))
(1, "day" )
print(Freq.parse("2mon"))
(2, "month")
print(Freq.parse("10w"))
(10, "week")
|
parse
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def get_min_delta(left_frq: str, right_freq: str):
"""Calculate freq delta
Parameters
----------
left_frq: str
right_freq: str
Returns
-------
"""
minutes_map = {
Freq.NORM_FREQ_MINUTE: 1,
Freq.NORM_FREQ_DAY: 60 * 24,
Freq.NORM_FREQ_WEEK: 7 * 60 * 24,
Freq.NORM_FREQ_MONTH: 30 * 7 * 60 * 24,
}
left_freq = Freq(left_frq)
left_minutes = left_freq.count * minutes_map[left_freq.base]
right_freq = Freq(right_freq)
right_minutes = right_freq.count * minutes_map[right_freq.base]
return left_minutes - right_minutes
|
Calculate freq delta
Parameters
----------
left_frq: str
right_freq: str
Returns
-------
|
get_min_delta
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def get_recent_freq(base_freq: Union[str, "Freq"], freq_list: List[Union[str, "Freq"]]) -> Optional["Freq"]:
"""Get the closest freq to base_freq from freq_list
Parameters
----------
base_freq
freq_list
Returns
-------
if the recent frequency is found
Freq
else:
None
"""
base_freq = Freq(base_freq)
# use the nearest freq greater than 0
min_freq = None
for _freq in freq_list:
_min_delta = Freq.get_min_delta(base_freq, _freq)
if _min_delta < 0:
continue
if min_freq is None:
min_freq = (_min_delta, str(_freq))
continue
min_freq = min_freq if min_freq[0] <= _min_delta else (_min_delta, _freq)
return min_freq[1] if min_freq else None
|
Get the closest freq to base_freq from freq_list
Parameters
----------
base_freq
freq_list
Returns
-------
if the recent frequency is found
Freq
else:
None
|
get_recent_freq
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def get_day_min_idx_range(start: str, end: str, freq: str, region: str) -> Tuple[int, int]:
"""
get the min-bar index in a day for a time range (both left and right is closed) given a fixed frequency
Parameters
----------
start : str
e.g. "9:30"
end : str
e.g. "14:30"
freq : str
"1min"
Returns
-------
Tuple[int, int]:
The index of start and end in the calendar. Both left and right are **closed**
"""
start = pd.Timestamp(start).time()
end = pd.Timestamp(end).time()
freq = Freq(freq)
in_day_cal = get_min_cal(region=region)[:: freq.count]
left_idx = bisect.bisect_left(in_day_cal, start)
right_idx = bisect.bisect_right(in_day_cal, end) - 1
return left_idx, right_idx
|
get the min-bar index in a day for a time range (both left and right is closed) given a fixed frequency
Parameters
----------
start : str
e.g. "9:30"
end : str
e.g. "14:30"
freq : str
"1min"
Returns
-------
Tuple[int, int]:
The index of start and end in the calendar. Both left and right are **closed**
|
get_day_min_idx_range
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def cal_sam_minute(x: pd.Timestamp, sam_minutes: int, region: str = REG_CN) -> pd.Timestamp:
"""
align the minute-level data to a down sampled calendar
e.g. align 10:38 to 10:35 in 5 minute-level(10:30 in 10 minute-level)
Parameters
----------
x : pd.Timestamp
datetime to be aligned
sam_minutes : int
align to `sam_minutes` minute-level calendar
region: str
Region, for example, "cn", "us"
Returns
-------
pd.Timestamp:
the datetime after aligned
"""
cal = get_min_cal(C.min_data_shift, region)[::sam_minutes]
idx = bisect.bisect_right(cal, x.time()) - 1
_date, new_time = x.date(), cal[idx]
return concat_date_time(_date, new_time)
|
align the minute-level data to a down sampled calendar
e.g. align 10:38 to 10:35 in 5 minute-level(10:30 in 10 minute-level)
Parameters
----------
x : pd.Timestamp
datetime to be aligned
sam_minutes : int
align to `sam_minutes` minute-level calendar
region: str
Region, for example, "cn", "us"
Returns
-------
pd.Timestamp:
the datetime after aligned
|
cal_sam_minute
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def epsilon_change(date_time: pd.Timestamp, direction: str = "backward") -> pd.Timestamp:
"""
change the time by infinitely small quantity.
Parameters
----------
date_time : pd.Timestamp
the original time
direction : str
the direction the time are going to
- "backward" for going to history
- "forward" for going to the future
Returns
-------
pd.Timestamp:
the shifted time
"""
if direction == "backward":
return date_time - pd.Timedelta(seconds=1)
elif direction == "forward":
return date_time + pd.Timedelta(seconds=1)
else:
raise ValueError("Wrong input")
|
change the time by infinitely small quantity.
Parameters
----------
date_time : pd.Timestamp
the original time
direction : str
the direction the time are going to
- "backward" for going to history
- "forward" for going to the future
Returns
-------
pd.Timestamp:
the shifted time
|
epsilon_change
|
python
|
microsoft/qlib
|
qlib/utils/time.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/time.py
|
MIT
|
def get_period_list(first: int, last: int, quarterly: bool) -> List[int]:
"""
This method will be used in PIT database.
It return all the possible values between `first` and `end` (first and end is included)
Parameters
----------
quarterly : bool
will it return quarterly index or yearly index.
Returns
-------
List[int]
the possible index between [first, last]
"""
if not quarterly:
assert all(1900 <= x <= 2099 for x in (first, last)), "invalid arguments"
return list(range(first, last + 1))
else:
assert all(190000 <= x <= 209904 for x in (first, last)), "invalid arguments"
res = []
for year in range(first // 100, last // 100 + 1):
for q in range(1, 5):
period = year * 100 + q
if first <= period <= last:
res.append(year * 100 + q)
return res
|
This method will be used in PIT database.
It return all the possible values between `first` and `end` (first and end is included)
Parameters
----------
quarterly : bool
will it return quarterly index or yearly index.
Returns
-------
List[int]
the possible index between [first, last]
|
get_period_list
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def read_period_data(
index_path,
data_path,
period,
cur_date_int: int,
quarterly,
last_period_index: int = None,
):
"""
At `cur_date`(e.g. 20190102), read the information at `period`(e.g. 201803).
Only the updating info before cur_date or at cur_date will be used.
Parameters
----------
period: int
date period represented by interger, e.g. 201901 corresponds to the first quarter in 2019
cur_date_int: int
date which represented by interger, e.g. 20190102
last_period_index: int
it is a optional parameter; it is designed to avoid repeatedly access the .index data of PIT database when
sequentially observing the data (Because the latest index of a specific period of data certainly appear in after the one in last observation).
Returns
-------
the query value and byte index the index value
"""
DATA_DTYPE = "".join(
[
C.pit_record_type["date"],
C.pit_record_type["period"],
C.pit_record_type["value"],
C.pit_record_type["index"],
]
)
PERIOD_DTYPE = C.pit_record_type["period"]
INDEX_DTYPE = C.pit_record_type["index"]
NAN_VALUE = C.pit_record_nan["value"]
NAN_INDEX = C.pit_record_nan["index"]
# find the first index of linked revisions
if last_period_index is None:
with open(index_path, "rb") as fi:
(first_year,) = struct.unpack(PERIOD_DTYPE, fi.read(struct.calcsize(PERIOD_DTYPE)))
all_periods = np.fromfile(fi, dtype=INDEX_DTYPE)
offset = get_period_offset(first_year, period, quarterly)
_next = all_periods[offset]
else:
_next = last_period_index
# load data following the `_next` link
prev_value = NAN_VALUE
prev_next = _next
with open(data_path, "rb") as fd:
while _next != NAN_INDEX:
fd.seek(_next)
date, period, value, new_next = struct.unpack(DATA_DTYPE, fd.read(struct.calcsize(DATA_DTYPE)))
if date > cur_date_int:
break
prev_next = _next
_next = new_next
prev_value = value
return prev_value, prev_next
|
At `cur_date`(e.g. 20190102), read the information at `period`(e.g. 201803).
Only the updating info before cur_date or at cur_date will be used.
Parameters
----------
period: int
date period represented by interger, e.g. 201901 corresponds to the first quarter in 2019
cur_date_int: int
date which represented by interger, e.g. 20190102
last_period_index: int
it is a optional parameter; it is designed to avoid repeatedly access the .index data of PIT database when
sequentially observing the data (Because the latest index of a specific period of data certainly appear in after the one in last observation).
Returns
-------
the query value and byte index the index value
|
read_period_data
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def np_ffill(arr: np.array):
"""
forward fill a 1D numpy array
Parameters
----------
arr : np.array
Input numpy 1D array
"""
mask = np.isnan(arr.astype(float)) # np.isnan only works on np.float
# get fill index
idx = np.where(~mask, np.arange(mask.shape[0]), 0)
np.maximum.accumulate(idx, out=idx)
return arr[idx]
|
forward fill a 1D numpy array
Parameters
----------
arr : np.array
Input numpy 1D array
|
np_ffill
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def lower_bound(data, val, level=0):
"""multi fields list lower bound.
for single field list use `bisect.bisect_left` instead
"""
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val <= data[mid][level]:
right = mid
else:
left = mid + 1
return left
|
multi fields list lower bound.
for single field list use `bisect.bisect_left` instead
|
lower_bound
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def upper_bound(data, val, level=0):
"""multi fields list upper bound.
for single field list use `bisect.bisect_right` instead
"""
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val >= data[mid][level]:
left = mid + 1
else:
right = mid
return left
|
multi fields list upper bound.
for single field list use `bisect.bisect_right` instead
|
upper_bound
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
def compare_dict_value(src_data: dict, dst_data: dict):
"""Compare dict value
:param src_data:
:param dst_data:
:return:
"""
class DateEncoder(json.JSONEncoder):
# FIXME: This class can only be accurate to the day. If it is a minute,
# there may be a bug
def default(self, o):
if isinstance(o, (datetime.datetime, datetime.date)):
return o.strftime("%Y-%m-%d %H:%M:%S")
return json.JSONEncoder.default(self, o)
src_data = json.dumps(src_data, indent=4, sort_keys=True, cls=DateEncoder)
dst_data = json.dumps(dst_data, indent=4, sort_keys=True, cls=DateEncoder)
diff = difflib.ndiff(src_data, dst_data)
changes = [line for line in diff if line.startswith("+ ") or line.startswith("- ")]
return changes
|
Compare dict value
:param src_data:
:param dst_data:
:return:
|
compare_dict_value
|
python
|
microsoft/qlib
|
qlib/utils/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/utils/__init__.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.