code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def quantile_loss(self, a, b):
"""Returns quantile loss for specified quantiles.
Args:
a: Targets
b: Predictions
"""
quantiles_used = set(self.quantiles)
loss = 0.0
for i, quantile in enumerate(valid_quantiles):
if quantile in quantiles_used:
loss += utils.tensorflow_quantile_loss(
a[Ellipsis, output_size * i : output_size * (i + 1)],
b[Ellipsis, output_size * i : output_size * (i + 1)],
quantile,
)
return loss
|
Returns quantile loss for specified quantiles.
Args:
a: Targets
b: Predictions
|
quantile_loss
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def fit(self, train_df=None, valid_df=None):
"""Fits deep neural network for given training and validation data.
Args:
train_df: DataFrame for training data
valid_df: DataFrame for validation data
"""
print("*** Fitting {} ***".format(self.name))
# Add relevant callbacks
callbacks = [
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=self.early_stopping_patience, min_delta=1e-4),
tf.keras.callbacks.ModelCheckpoint(
filepath=self.get_keras_saved_path(self._temp_folder),
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
),
tf.keras.callbacks.TerminateOnNaN(),
]
print("Getting batched_data")
if train_df is None:
print("Using cached training data")
train_data = TFTDataCache.get("train")
else:
train_data = self._batch_data(train_df)
if valid_df is None:
print("Using cached validation data")
valid_data = TFTDataCache.get("valid")
else:
valid_data = self._batch_data(valid_df)
print("Using keras standard fit")
def _unpack(data):
return data["inputs"], data["outputs"], self._get_active_locations(data["active_entries"])
# Unpack without sample weights
data, labels, active_flags = _unpack(train_data)
val_data, val_labels, val_flags = _unpack(valid_data)
all_callbacks = callbacks
self.model.fit(
x=data,
y=np.concatenate([labels, labels, labels], axis=-1),
sample_weight=active_flags,
epochs=self.num_epochs,
batch_size=self.minibatch_size,
validation_data=(val_data, np.concatenate([val_labels, val_labels, val_labels], axis=-1), val_flags),
callbacks=all_callbacks,
shuffle=True,
use_multiprocessing=True,
workers=self.n_multiprocessing_workers,
)
# Load best checkpoint again
tmp_checkpont = self.get_keras_saved_path(self._temp_folder)
if os.path.exists(tmp_checkpont):
self.load(self._temp_folder, use_keras_loadings=True)
else:
print("Cannot load from {}, skipping ...".format(self._temp_folder))
|
Fits deep neural network for given training and validation data.
Args:
train_df: DataFrame for training data
valid_df: DataFrame for validation data
|
fit
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def evaluate(self, data=None, eval_metric="loss"):
"""Applies evaluation metric to the training data.
Args:
data: Dataframe for evaluation
eval_metric: Evaluation metic to return, based on model definition.
Returns:
Computed evaluation loss.
"""
if data is None:
print("Using cached validation data")
raw_data = TFTDataCache.get("valid")
else:
raw_data = self._batch_data(data)
inputs = raw_data["inputs"]
outputs = raw_data["outputs"]
active_entries = self._get_active_locations(raw_data["active_entries"])
metric_values = self.model.evaluate(
x=inputs,
y=np.concatenate([outputs, outputs, outputs], axis=-1),
sample_weight=active_entries,
workers=16,
use_multiprocessing=True,
)
metrics = pd.Series(metric_values, self.model.metrics_names)
return metrics[eval_metric]
|
Applies evaluation metric to the training data.
Args:
data: Dataframe for evaluation
eval_metric: Evaluation metic to return, based on model definition.
Returns:
Computed evaluation loss.
|
evaluate
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def predict(self, df, return_targets=False):
"""Computes predictions for a given input dataset.
Args:
df: Input dataframe
return_targets: Whether to also return outputs aligned with predictions to
facilitate evaluation
Returns:
Input dataframe or tuple of (input dataframe, aligned output dataframe).
"""
data = self._batch_data(df)
inputs = data["inputs"]
time = data["time"]
identifier = data["identifier"]
outputs = data["outputs"]
combined = self.model.predict(inputs, workers=16, use_multiprocessing=True, batch_size=self.minibatch_size)
# Format output_csv
if self.output_size != 1:
raise NotImplementedError("Current version only supports 1D targets!")
def format_outputs(prediction):
"""Returns formatted dataframes for prediction."""
flat_prediction = pd.DataFrame(
prediction[:, :, 0], columns=["t+{}".format(i) for i in range(self.time_steps - self.num_encoder_steps)]
)
cols = list(flat_prediction.columns)
flat_prediction["forecast_time"] = time[:, self.num_encoder_steps - 1, 0]
flat_prediction["identifier"] = identifier[:, 0, 0]
# Arrange in order
return flat_prediction[["forecast_time", "identifier"] + cols]
# Extract predictions for each quantile into different entries
process_map = {
"p{}".format(int(q * 100)): combined[Ellipsis, i * self.output_size : (i + 1) * self.output_size]
for i, q in enumerate(self.quantiles)
}
if return_targets:
# Add targets if relevant
process_map["targets"] = outputs
return {k: format_outputs(process_map[k]) for k in process_map}
|
Computes predictions for a given input dataset.
Args:
df: Input dataframe
return_targets: Whether to also return outputs aligned with predictions to
facilitate evaluation
Returns:
Input dataframe or tuple of (input dataframe, aligned output dataframe).
|
predict
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def get_attention(self, df):
"""Computes TFT attention weights for a given dataset.
Args:
df: Input dataframe
Returns:
Dictionary of numpy arrays for temporal attention weights and variable
selection weights, along with their identifiers and time indices
"""
data = self._batch_data(df)
inputs = data["inputs"]
identifiers = data["identifier"]
time = data["time"]
def get_batch_attention_weights(input_batch):
"""Returns weights for a given minibatch of data."""
input_placeholder = self._input_placeholder
attention_weights = {}
for k in self._attention_components:
attention_weight = tf.keras.backend.get_session().run(
self._attention_components[k], {input_placeholder: input_batch.astype(np.float32)}
)
attention_weights[k] = attention_weight
return attention_weights
# Compute number of batches
batch_size = self.minibatch_size
n = inputs.shape[0]
num_batches = n // batch_size
if n - (num_batches * batch_size) > 0:
num_batches += 1
# Split up inputs into batches
batched_inputs = [inputs[i * batch_size : (i + 1) * batch_size, Ellipsis] for i in range(num_batches)]
# Get attention weights, while avoiding large memory increases
attention_by_batch = [get_batch_attention_weights(batch) for batch in batched_inputs]
attention_weights = {}
for k in self._attention_components:
attention_weights[k] = []
for batch_weights in attention_by_batch:
attention_weights[k].append(batch_weights[k])
if len(attention_weights[k][0].shape) == 4:
tmp = np.concatenate(attention_weights[k], axis=1)
else:
tmp = np.concatenate(attention_weights[k], axis=0)
del attention_weights[k]
gc.collect()
attention_weights[k] = tmp
attention_weights["identifiers"] = identifiers[:, 0, 0]
attention_weights["time"] = time[:, :, 0]
return attention_weights
|
Computes TFT attention weights for a given dataset.
Args:
df: Input dataframe
Returns:
Dictionary of numpy arrays for temporal attention weights and variable
selection weights, along with their identifiers and time indices
|
get_attention
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def get_batch_attention_weights(input_batch):
"""Returns weights for a given minibatch of data."""
input_placeholder = self._input_placeholder
attention_weights = {}
for k in self._attention_components:
attention_weight = tf.keras.backend.get_session().run(
self._attention_components[k], {input_placeholder: input_batch.astype(np.float32)}
)
attention_weights[k] = attention_weight
return attention_weights
|
Returns weights for a given minibatch of data.
|
get_batch_attention_weights
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def reset_temp_folder(self):
"""Deletes and recreates folder with temporary Keras training outputs."""
print("Resetting temp folder...")
utils.create_folder_if_not_exist(self._temp_folder)
shutil.rmtree(self._temp_folder)
os.makedirs(self._temp_folder)
|
Deletes and recreates folder with temporary Keras training outputs.
|
reset_temp_folder
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def save(self, model_folder):
"""Saves optimal TFT weights.
Args:
model_folder: Location to serialze model.
"""
# Allows for direct serialisation of tensorflow variables to avoid spurious
# issue with Keras that leads to different performance evaluation results
# when model is reloaded (https://github.com/keras-team/keras/issues/4875).
utils.save(tf.keras.backend.get_session(), model_folder, cp_name=self.name, scope=self.name)
|
Saves optimal TFT weights.
Args:
model_folder: Location to serialze model.
|
save
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def load(self, model_folder, use_keras_loadings=False):
"""Loads TFT weights.
Args:
model_folder: Folder containing serialized models.
use_keras_loadings: Whether to load from Keras checkpoint.
Returns:
"""
if use_keras_loadings:
# Loads temporary Keras model saved during training.
serialisation_path = self.get_keras_saved_path(model_folder)
print("Loading model from {}".format(serialisation_path))
self.model.load_weights(serialisation_path)
else:
# Loads tensorflow graph for optimal models.
utils.load(tf.keras.backend.get_session(), model_folder, cp_name=self.name, scope=self.name)
|
Loads TFT weights.
Args:
model_folder: Folder containing serialized models.
use_keras_loadings: Whether to load from Keras checkpoint.
Returns:
|
load
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def get_hyperparm_choices(cls):
"""Returns hyperparameter ranges for random search."""
return {
"dropout_rate": [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9],
"hidden_layer_size": [10, 20, 40, 80, 160, 240, 320],
"minibatch_size": [64, 128, 256],
"learning_rate": [1e-4, 1e-3, 1e-2],
"max_gradient_norm": [0.01, 1.0, 100.0],
"num_heads": [1, 4],
"stack_size": [1],
}
|
Returns hyperparameter ranges for random search.
|
get_hyperparm_choices
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/tft_model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/tft_model.py
|
MIT
|
def get_single_col_by_input_type(input_type, column_definition):
"""Returns name of single column.
Args:
input_type: Input type of column to extract
column_definition: Column definition list for experiment
"""
l = [tup[0] for tup in column_definition if tup[2] == input_type]
if len(l) != 1:
raise ValueError("Invalid number of columns for {}".format(input_type))
return l[0]
|
Returns name of single column.
Args:
input_type: Input type of column to extract
column_definition: Column definition list for experiment
|
get_single_col_by_input_type
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def tensorflow_quantile_loss(y, y_pred, quantile):
"""Computes quantile loss for tensorflow.
Standard quantile loss as defined in the "Training Procedure" section of
the main TFT paper
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Tensor for quantile loss.
"""
# Checks quantile
if quantile < 0 or quantile > 1:
raise ValueError("Illegal quantile value={}! Values should be between 0 and 1.".format(quantile))
prediction_underflow = y - y_pred
q_loss = quantile * tf.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * tf.maximum(
-prediction_underflow, 0.0
)
return tf.reduce_sum(q_loss, axis=-1)
|
Computes quantile loss for tensorflow.
Standard quantile loss as defined in the "Training Procedure" section of
the main TFT paper
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Tensor for quantile loss.
|
tensorflow_quantile_loss
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def numpy_normalised_quantile_loss(y, y_pred, quantile):
"""Computes normalised quantile loss for numpy arrays.
Uses the q-Risk metric as defined in the "Training Procedure" section of the
main TFT paper.
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Float for normalised quantile loss.
"""
prediction_underflow = y - y_pred
weighted_errors = quantile * np.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * np.maximum(
-prediction_underflow, 0.0
)
quantile_loss = weighted_errors.mean()
normaliser = y.abs().mean()
return 2 * quantile_loss / normaliser
|
Computes normalised quantile loss for numpy arrays.
Uses the q-Risk metric as defined in the "Training Procedure" section of the
main TFT paper.
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Float for normalised quantile loss.
|
numpy_normalised_quantile_loss
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def get_default_tensorflow_config(tf_device="gpu", gpu_id=0):
"""Creates tensorflow config for graphs to run on CPU or GPU.
Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi
GPU machines.
Args:
tf_device: 'cpu' or 'gpu'
gpu_id: GPU ID to use if relevant
Returns:
Tensorflow config.
"""
if tf_device == "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # for training on cpu
tf_config = tf.ConfigProto(log_device_placement=False, device_count={"GPU": 0})
else:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
print("Selecting GPU ID={}".format(gpu_id))
tf_config = tf.ConfigProto(log_device_placement=False)
tf_config.gpu_options.allow_growth = True
return tf_config
|
Creates tensorflow config for graphs to run on CPU or GPU.
Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi
GPU machines.
Args:
tf_device: 'cpu' or 'gpu'
gpu_id: GPU ID to use if relevant
Returns:
Tensorflow config.
|
get_default_tensorflow_config
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def save(tf_session, model_folder, cp_name, scope=None):
"""Saves Tensorflow graph to checkpoint.
Saves all trainiable variables under a given variable scope to checkpoint.
Args:
tf_session: Session containing graph
model_folder: Folder to save models
cp_name: Name of Tensorflow checkpoint
scope: Variable scope containing variables to save
"""
# Save model
if scope is None:
saver = tf.train.Saver()
else:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
saver = tf.train.Saver(var_list=var_list, max_to_keep=100000)
save_path = saver.save(tf_session, os.path.join(model_folder, "{0}.ckpt".format(cp_name)))
print("Model saved to: {0}".format(save_path))
|
Saves Tensorflow graph to checkpoint.
Saves all trainiable variables under a given variable scope to checkpoint.
Args:
tf_session: Session containing graph
model_folder: Folder to save models
cp_name: Name of Tensorflow checkpoint
scope: Variable scope containing variables to save
|
save
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def load(tf_session, model_folder, cp_name, scope=None, verbose=False):
"""Loads Tensorflow graph from checkpoint.
Args:
tf_session: Session to load graph into
model_folder: Folder containing serialised model
cp_name: Name of Tensorflow checkpoint
scope: Variable scope to use.
verbose: Whether to print additional debugging information.
"""
# Load model proper
load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name))
print("Loading model from {0}".format(load_path))
print_weights_in_checkpoint(model_folder, cp_name)
initial_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node])
# Saver
if scope is None:
saver = tf.train.Saver()
else:
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
saver = tf.train.Saver(var_list=var_list, max_to_keep=100000)
# Load
saver.restore(tf_session, load_path)
all_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node])
if verbose:
print("Restored {0}".format(",".join(initial_vars.difference(all_vars))))
print("Existing {0}".format(",".join(all_vars.difference(initial_vars))))
print("All {0}".format(",".join(all_vars)))
print("Done.")
|
Loads Tensorflow graph from checkpoint.
Args:
tf_session: Session to load graph into
model_folder: Folder containing serialised model
cp_name: Name of Tensorflow checkpoint
scope: Variable scope to use.
verbose: Whether to print additional debugging information.
|
load
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def print_weights_in_checkpoint(model_folder, cp_name):
"""Prints all weights in Tensorflow checkpoint.
Args:
model_folder: Folder containing checkpoint
cp_name: Name of checkpoint
Returns:
"""
load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name))
print_tensors_in_checkpoint_file(file_name=load_path, tensor_name="", all_tensors=True, all_tensor_names=True)
|
Prints all weights in Tensorflow checkpoint.
Args:
model_folder: Folder containing checkpoint
cp_name: Name of checkpoint
Returns:
|
print_weights_in_checkpoint
|
python
|
microsoft/qlib
|
examples/benchmarks/TFT/libs/utils.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TFT/libs/utils.py
|
MIT
|
def _create_ts_slices(index, seq_len):
"""
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
"""
assert index.is_lexsorted(), "index should be sorted"
# number of dates for each code
sample_count_by_codes = pd.Series(0, index=index).groupby(level=0, group_keys=False).size().values
# start_index for each code
start_index_of_codes = np.roll(np.cumsum(sample_count_by_codes), 1)
start_index_of_codes[0] = 0
# all the [start, stop) indices of features
# features btw [start, stop) are used to predict the `stop - 1` label
slices = []
for cur_loc, cur_cnt in zip(start_index_of_codes, sample_count_by_codes):
for stop in range(1, cur_cnt + 1):
end = cur_loc + stop
start = max(end - seq_len, 0)
slices.append(slice(start, end))
slices = np.array(slices)
return slices
|
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
|
_create_ts_slices
|
python
|
microsoft/qlib
|
examples/benchmarks/TRA/src/dataset.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TRA/src/dataset.py
|
MIT
|
def _get_date_parse_fn(target):
"""get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
"""
if isinstance(target, pd.Timestamp):
_fn = lambda x: pd.Timestamp(x) # Timestamp('2020-01-01')
elif isinstance(target, str) and len(target) == 8:
_fn = lambda x: str(x).replace("-", "")[:8] # '20200201'
elif isinstance(target, int):
_fn = lambda x: int(str(x).replace("-", "")[:8]) # 20200201
else:
_fn = lambda x: x
return _fn
|
get date parse function
This method is used to parse date arguments as target type.
Example:
get_date_parse_fn('20120101')('2017-01-01') => '20170101'
get_date_parse_fn(20120101)('2017-01-01') => 20170101
|
_get_date_parse_fn
|
python
|
microsoft/qlib
|
examples/benchmarks/TRA/src/dataset.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TRA/src/dataset.py
|
MIT
|
def shoot_infs(inp_tensor):
"""Replaces inf by maximum of tensor"""
mask_inf = torch.isinf(inp_tensor)
ind_inf = torch.nonzero(mask_inf, as_tuple=False)
if len(ind_inf) > 0:
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = 0
elif len(ind) == 1:
inp_tensor[ind[0]] = 0
m = torch.max(inp_tensor)
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = m
elif len(ind) == 1:
inp_tensor[ind[0]] = m
return inp_tensor
|
Replaces inf by maximum of tensor
|
shoot_infs
|
python
|
microsoft/qlib
|
examples/benchmarks/TRA/src/model.py
|
https://github.com/microsoft/qlib/blob/master/examples/benchmarks/TRA/src/model.py
|
MIT
|
def get_data(self):
"""use dataset to get highreq data"""
self._init_qlib()
self._prepare_calender_cache()
dataset = init_instance_by_config(self.task["dataset"])
xtrain, xtest = dataset.prepare(["train", "test"])
print(xtrain, xtest)
dataset_backtest = init_instance_by_config(self.task["dataset_backtest"])
backtest_train, backtest_test = dataset_backtest.prepare(["train", "test"])
print(backtest_train, backtest_test)
return
|
use dataset to get highreq data
|
get_data
|
python
|
microsoft/qlib
|
examples/highfreq/workflow.py
|
https://github.com/microsoft/qlib/blob/master/examples/highfreq/workflow.py
|
MIT
|
def dump_and_load_dataset(self):
"""dump and load dataset state on disk"""
self._init_qlib()
self._prepare_calender_cache()
dataset = init_instance_by_config(self.task["dataset"])
dataset_backtest = init_instance_by_config(self.task["dataset_backtest"])
##=============dump dataset=============
dataset.to_pickle(path="dataset.pkl")
dataset_backtest.to_pickle(path="dataset_backtest.pkl")
del dataset, dataset_backtest
##=============reload dataset=============
with open("dataset.pkl", "rb") as file_dataset:
dataset = pickle.load(file_dataset)
with open("dataset_backtest.pkl", "rb") as file_dataset_backtest:
dataset_backtest = pickle.load(file_dataset_backtest)
self._prepare_calender_cache()
##=============reinit dataset=============
dataset.config(
handler_kwargs={
"start_time": "2021-01-19 00:00:00",
"end_time": "2021-01-25 16:00:00",
},
segments={
"test": (
"2021-01-19 00:00:00",
"2021-01-25 16:00:00",
),
},
)
dataset.setup_data(
handler_kwargs={
"init_type": DataHandlerLP.IT_LS,
},
)
dataset_backtest.config(
handler_kwargs={
"start_time": "2021-01-19 00:00:00",
"end_time": "2021-01-25 16:00:00",
},
segments={
"test": (
"2021-01-19 00:00:00",
"2021-01-25 16:00:00",
),
},
)
dataset_backtest.setup_data(handler_kwargs={})
##=============get data=============
xtest = dataset.prepare("test")
backtest_test = dataset_backtest.prepare("test")
print(xtest, backtest_test)
return
|
dump and load dataset state on disk
|
dump_and_load_dataset
|
python
|
microsoft/qlib
|
examples/highfreq/workflow.py
|
https://github.com/microsoft/qlib/blob/master/examples/highfreq/workflow.py
|
MIT
|
def backtest_only_daily(self):
"""
This backtest is used for comparing the nested execution and single layer execution
Due to the low quality daily-level and miniute-level data, they are hardly comparable.
So it is used for detecting serious bugs which make the results different greatly.
.. code-block:: shell
[1724971:MainThread](2021-12-07 16:24:31,156) INFO - qlib.workflow - [record_temp.py:441] - Portfolio analysis record 'port_analysis_1day.pkl'
has been saved as the artifact of the Experiment 2
'The following are analysis results of benchmark return(1day).'
risk
mean 0.000651
std 0.012472
annualized_return 0.154967
information_ratio 0.805422
max_drawdown -0.160445
'The following are analysis results of the excess return without cost(1day).'
risk
mean 0.001375
std 0.006103
annualized_return 0.327204
information_ratio 3.475016
max_drawdown -0.024927
'The following are analysis results of the excess return with cost(1day).'
risk
mean 0.001184
std 0.006091
annualized_return 0.281801
information_ratio 2.998749
max_drawdown -0.029568
[1724971:MainThread](2021-12-07 16:24:31,170) INFO - qlib.workflow - [record_temp.py:466] - Indicator analysis record 'indicator_analysis_1day.
pkl' has been saved as the artifact of the Experiment 2
'The following are analysis results of indicators(1day).'
value
ffr 1.0
pa 0.0
pos 0.0
[1724971:MainThread](2021-12-07 16:24:31,188) INFO - qlib.timer - [log.py:113] - Time cost: 0.007s | waiting `async_log` Done
"""
self._init_qlib()
model = init_instance_by_config(self.task["model"])
dataset = init_instance_by_config(self.task["dataset"])
self._train_model(model, dataset)
strategy_config = {
"class": "TopkDropoutStrategy",
"module_path": "qlib.contrib.strategy.signal_strategy",
"kwargs": {
"signal": (model, dataset),
"topk": 50,
"n_drop": 5,
},
}
pa_conf = deepcopy(self.port_analysis_config)
pa_conf["strategy"] = strategy_config
pa_conf["executor"] = {
"class": "SimulatorExecutor",
"module_path": "qlib.backtest.executor",
"kwargs": {
"time_per_step": "day",
"generate_portfolio_metrics": True,
"verbose": True,
},
}
pa_conf["backtest"]["benchmark"] = self.benchmark
with R.start(experiment_name=self.exp_name, resume=True):
recorder = R.get_recorder()
par = PortAnaRecord(recorder, pa_conf)
par.generate()
|
This backtest is used for comparing the nested execution and single layer execution
Due to the low quality daily-level and miniute-level data, they are hardly comparable.
So it is used for detecting serious bugs which make the results different greatly.
.. code-block:: shell
[1724971:MainThread](2021-12-07 16:24:31,156) INFO - qlib.workflow - [record_temp.py:441] - Portfolio analysis record 'port_analysis_1day.pkl'
has been saved as the artifact of the Experiment 2
'The following are analysis results of benchmark return(1day).'
risk
mean 0.000651
std 0.012472
annualized_return 0.154967
information_ratio 0.805422
max_drawdown -0.160445
'The following are analysis results of the excess return without cost(1day).'
risk
mean 0.001375
std 0.006103
annualized_return 0.327204
information_ratio 3.475016
max_drawdown -0.024927
'The following are analysis results of the excess return with cost(1day).'
risk
mean 0.001184
std 0.006091
annualized_return 0.281801
information_ratio 2.998749
max_drawdown -0.029568
[1724971:MainThread](2021-12-07 16:24:31,170) INFO - qlib.workflow - [record_temp.py:466] - Indicator analysis record 'indicator_analysis_1day.
pkl' has been saved as the artifact of the Experiment 2
'The following are analysis results of indicators(1day).'
value
ffr 1.0
pa 0.0
pos 0.0
[1724971:MainThread](2021-12-07 16:24:31,188) INFO - qlib.timer - [log.py:113] - Time cost: 0.007s | waiting `async_log` Done
|
backtest_only_daily
|
python
|
microsoft/qlib
|
examples/nested_decision_execution/workflow.py
|
https://github.com/microsoft/qlib/blob/master/examples/nested_decision_execution/workflow.py
|
MIT
|
def __init__(
self,
provider_uri="~/.qlib/qlib_data/cn_data",
region="cn",
exp_name="rolling_exp",
task_url="mongodb://10.0.0.4:27017/", # not necessary when using TrainerR or DelayTrainerR
task_db_name="rolling_db", # not necessary when using TrainerR or DelayTrainerR
task_pool="rolling_task",
rolling_step=80,
start_time="2018-09-10",
end_time="2018-10-31",
tasks=None,
trainer="TrainerR",
):
"""
Init OnlineManagerExample.
Args:
provider_uri (str, optional): the provider uri. Defaults to "~/.qlib/qlib_data/cn_data".
region (str, optional): the stock region. Defaults to "cn".
exp_name (str, optional): the experiment name. Defaults to "rolling_exp".
task_url (str, optional): your MongoDB url. Defaults to "mongodb://10.0.0.4:27017/".
task_db_name (str, optional): database name. Defaults to "rolling_db".
task_pool (str, optional): the task pool name (a task pool is a collection in MongoDB). Defaults to "rolling_task".
rolling_step (int, optional): the step for rolling. Defaults to 80.
start_time (str, optional): the start time of simulating. Defaults to "2018-09-10".
end_time (str, optional): the end time of simulating. Defaults to "2018-10-31".
tasks (dict or list[dict]): a set of the task config waiting for rolling and training
"""
if tasks is None:
tasks = [CSI100_RECORD_XGBOOST_TASK_CONFIG_ONLINE, CSI100_RECORD_LGB_TASK_CONFIG_ONLINE]
self.exp_name = exp_name
self.task_pool = task_pool
self.start_time = start_time
self.end_time = end_time
mongo_conf = {
"task_url": task_url,
"task_db_name": task_db_name,
}
qlib.init(provider_uri=provider_uri, region=region, mongo=mongo_conf)
self.rolling_gen = RollingGen(
step=rolling_step, rtype=RollingGen.ROLL_SD, ds_extra_mod_func=None
) # The rolling tasks generator, ds_extra_mod_func is None because we just need to simulate to 2018-10-31 and needn't change the handler end time.
if trainer == "TrainerRM":
self.trainer = TrainerRM(self.exp_name, self.task_pool)
elif trainer == "TrainerR":
self.trainer = TrainerR(self.exp_name)
else:
# TODO: support all the trainers: TrainerR, TrainerRM, DelayTrainerR
raise NotImplementedError(f"This type of input is not supported")
self.rolling_online_manager = OnlineManager(
RollingStrategy(exp_name, task_template=tasks, rolling_gen=self.rolling_gen),
trainer=self.trainer,
begin_time=self.start_time,
)
self.tasks = tasks
|
Init OnlineManagerExample.
Args:
provider_uri (str, optional): the provider uri. Defaults to "~/.qlib/qlib_data/cn_data".
region (str, optional): the stock region. Defaults to "cn".
exp_name (str, optional): the experiment name. Defaults to "rolling_exp".
task_url (str, optional): your MongoDB url. Defaults to "mongodb://10.0.0.4:27017/".
task_db_name (str, optional): database name. Defaults to "rolling_db".
task_pool (str, optional): the task pool name (a task pool is a collection in MongoDB). Defaults to "rolling_task".
rolling_step (int, optional): the step for rolling. Defaults to 80.
start_time (str, optional): the start time of simulating. Defaults to "2018-09-10".
end_time (str, optional): the end time of simulating. Defaults to "2018-10-31".
tasks (dict or list[dict]): a set of the task config waiting for rolling and training
|
__init__
|
python
|
microsoft/qlib
|
examples/online_srv/online_management_simulate.py
|
https://github.com/microsoft/qlib/blob/master/examples/online_srv/online_management_simulate.py
|
MIT
|
def add_one_stock_daily_data(filepath, type, exchange_place, arc, date):
"""
exchange_place: "SZ" OR "SH"
type: "tick", "orderbook", ...
filepath: the path of csv
arc: arclink created by a process
"""
code = os.path.split(filepath)[-1].split(".csv")[0]
if exchange_place == "SH" and code[0] != "6":
return
if exchange_place == "SZ" and code[0] != "0" and code[:2] != "30":
return
df = pd.read_csv(filepath, encoding="gbk", dtype={"code": str})
code = os.path.split(filepath)[-1].split(".csv")[0]
def format_time(day, hms):
day = str(day)
hms = str(hms)
if hms[0] == "1": # >=10,
return (
"-".join([day[0:4], day[4:6], day[6:8]]) + " " + ":".join([hms[:2], hms[2:4], hms[4:6] + "." + hms[6:]])
)
else:
return (
"-".join([day[0:4], day[4:6], day[6:8]]) + " " + ":".join([hms[:1], hms[1:3], hms[3:5] + "." + hms[5:]])
)
## Discard the entire row if wrong data timestamp encoutered.
timestamp = list(zip(list(df["date"]), list(df["time"])))
error_index_list = []
for index, t in enumerate(timestamp):
try:
pd.Timestamp(format_time(t[0], t[1]))
except Exception:
error_index_list.append(index) ## The row number of the error line
# to-do: writting to logs
if len(error_index_list) > 0:
print("error: {}, {}".format(filepath, len(error_index_list)))
df = df.drop(error_index_list)
timestamp = list(zip(list(df["date"]), list(df["time"]))) ## The cleaned timestamp
# generate timestamp
pd_timestamp = pd.DatetimeIndex(
[pd.Timestamp(format_time(timestamp[i][0], timestamp[i][1])) for i in range(len(df["date"]))]
)
df = df.drop(columns=["date", "time", "name", "code", "wind_code"])
# df = pd.DataFrame(data=df.to_dict("list"), index=pd_timestamp)
df["date"] = pd.to_datetime(pd_timestamp)
df.set_index("date", inplace=True)
if str.lower(type) == "orderqueue":
## extract ab1~ab50
df["ab"] = [
",".join([str(int(row["ab" + str(i + 1)])) for i in range(0, row["ab_items"])])
for timestamp, row in df.iterrows()
]
df = df.drop(columns=["ab" + str(i) for i in range(1, 51)])
type = get_library_name(type)
# arc.initialize_library(type, lib_type=CHUNK_STORE)
lib = arc[type]
symbol = "".join([exchange_place, code])
if symbol in lib.list_symbols():
print("update {0}, date={1}".format(symbol, date))
if df.empty == True:
return error_index_list
lib.update(symbol, df, chunk_size="D")
else:
print("write {0}, date={1}".format(symbol, date))
lib.write(symbol, df, chunk_size="D")
return error_index_list
|
exchange_place: "SZ" OR "SH"
type: "tick", "orderbook", ...
filepath: the path of csv
arc: arclink created by a process
|
add_one_stock_daily_data
|
python
|
microsoft/qlib
|
examples/orderbook_data/create_dataset.py
|
https://github.com/microsoft/qlib/blob/master/examples/orderbook_data/create_dataset.py
|
MIT
|
def __init__(self, provider_uri: Union[str, Path, dict], mount_path: Union[str, Path, dict]):
"""
The relation of `provider_uri` and `mount_path`
- `mount_path` is used only if provider_uri is an NFS path
- otherwise, provider_uri will be used for accessing data
"""
self.provider_uri = provider_uri
self.mount_path = mount_path
|
The relation of `provider_uri` and `mount_path`
- `mount_path` is used only if provider_uri is an NFS path
- otherwise, provider_uri will be used for accessing data
|
__init__
|
python
|
microsoft/qlib
|
qlib/config.py
|
https://github.com/microsoft/qlib/blob/master/qlib/config.py
|
MIT
|
def get_data_uri(self, freq: Optional[Union[str, Freq]] = None) -> Path:
"""
please refer DataPathManager's __init__ and class doc
"""
if freq is not None:
freq = str(freq) # converting Freq to string
if freq is None or freq not in self.provider_uri:
freq = QlibConfig.DEFAULT_FREQ
_provider_uri = self.provider_uri[freq]
if self.get_uri_type(_provider_uri) == QlibConfig.LOCAL_URI:
return Path(_provider_uri)
elif self.get_uri_type(_provider_uri) == QlibConfig.NFS_URI:
if "win" in platform.system().lower():
# windows, mount_path is the drive
_path = str(self.mount_path[freq])
return Path(f"{_path}:\\") if ":" not in _path else Path(_path)
return Path(self.mount_path[freq])
else:
raise NotImplementedError(f"This type of uri is not supported")
|
please refer DataPathManager's __init__ and class doc
|
get_data_uri
|
python
|
microsoft/qlib
|
qlib/config.py
|
https://github.com/microsoft/qlib/blob/master/qlib/config.py
|
MIT
|
def set(self, default_conf: str = "client", **kwargs):
"""
configure qlib based on the input parameters
The configuration will act like a dictionary.
Normally, it literally is replaced the value according to the keys.
However, sometimes it is hard for users to set the config when the configuration is nested and complicated
So this API provides some special parameters for users to set the keys in a more convenient way.
- region: REG_CN, REG_US
- several region-related config will be changed
Parameters
----------
default_conf : str
the default config template chosen by user: "server", "client"
"""
from .utils import set_log_with_config, get_module_logger, can_use_cache # pylint: disable=C0415
self.reset()
_logging_config = kwargs.get("logging_config", self.logging_config)
# set global config
if _logging_config:
set_log_with_config(_logging_config)
logger = get_module_logger("Initialization", kwargs.get("logging_level", self.logging_level))
logger.info(f"default_conf: {default_conf}.")
self.set_mode(default_conf)
self.set_region(kwargs.get("region", self["region"] if "region" in self else REG_CN))
for k, v in kwargs.items():
if k not in self:
logger.warning("Unrecognized config %s" % k)
self[k] = v
self.resolve_path()
if not (self["expression_cache"] is None and self["dataset_cache"] is None):
# check redis
if not can_use_cache():
log_str = ""
# check expression cache
if self.is_depend_redis(self["expression_cache"]):
log_str += self["expression_cache"]
self["expression_cache"] = None
# check dataset cache
if self.is_depend_redis(self["dataset_cache"]):
log_str += f" and {self['dataset_cache']}" if log_str else self["dataset_cache"]
self["dataset_cache"] = None
if log_str:
logger.warning(
f"redis connection failed(host={self['redis_host']} port={self['redis_port']}), "
f"{log_str} will not be used!"
)
|
configure qlib based on the input parameters
The configuration will act like a dictionary.
Normally, it literally is replaced the value according to the keys.
However, sometimes it is hard for users to set the config when the configuration is nested and complicated
So this API provides some special parameters for users to set the keys in a more convenient way.
- region: REG_CN, REG_US
- several region-related config will be changed
Parameters
----------
default_conf : str
the default config template chosen by user: "server", "client"
|
set
|
python
|
microsoft/qlib
|
qlib/config.py
|
https://github.com/microsoft/qlib/blob/master/qlib/config.py
|
MIT
|
def get_kernels(self, freq: str):
"""get number of processors given frequency"""
if isinstance(self["kernels"], Callable):
return self["kernels"](freq)
return self["kernels"]
|
get number of processors given frequency
|
get_kernels
|
python
|
microsoft/qlib
|
qlib/config.py
|
https://github.com/microsoft/qlib/blob/master/qlib/config.py
|
MIT
|
def __call__(self, module_name, level: Optional[int] = None) -> QlibLogger:
"""
Get a logger for a specific module.
:param module_name: str
Logic module name.
:param level: int
:return: Logger
Logger object.
"""
if level is None:
level = C.logging_level
if not module_name.startswith("qlib."):
# Add a prefix of qlib. when the requested ``module_name`` doesn't start with ``qlib.``.
# If the module_name is already qlib.xxx, we do not format here. Otherwise, it will become qlib.qlib.xxx.
module_name = "qlib.{}".format(module_name)
# Get logger.
module_logger = self._loggers.setdefault(module_name, QlibLogger(module_name))
module_logger.setLevel(level)
return module_logger
|
Get a logger for a specific module.
:param module_name: str
Logic module name.
:param level: int
:return: Logger
Logger object.
|
__call__
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def set_time_mark(cls):
"""
Set a time mark with current time, and this time mark will push into a stack.
:return: float
A timestamp for current time.
"""
_time = time()
cls.time_marks.append(_time)
return _time
|
Set a time mark with current time, and this time mark will push into a stack.
:return: float
A timestamp for current time.
|
set_time_mark
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def log_cost_time(cls, info="Done"):
"""
Get last time mark from stack, calculate time diff with current time, and log time diff and info.
:param info: str
Info that will be logged into stdout.
"""
cost_time = time() - cls.time_marks.pop()
cls.timer_logger.info("Time cost: {0:.3f}s | {1}".format(cost_time, info))
|
Get last time mark from stack, calculate time diff with current time, and log time diff and info.
:param info: str
Info that will be logged into stdout.
|
log_cost_time
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def logt(cls, name="", show_start=False):
"""logt.
Log the time of the inside code
Parameters
----------
name :
name
show_start :
show_start
"""
if show_start:
cls.timer_logger.info(f"{name} Begin")
cls.set_time_mark()
try:
yield None
finally:
pass
cls.log_cost_time(info=f"{name} Done")
|
logt.
Log the time of the inside code
Parameters
----------
name :
name
show_start :
show_start
|
logt
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def set_global_logger_level(level: int, return_orig_handler_level: bool = False):
"""set qlib.xxx logger handlers level
Parameters
----------
level: int
logger level
return_orig_handler_level: bool
return origin handler level map
Examples
---------
.. code-block:: python
import qlib
import logging
from qlib.log import get_module_logger, set_global_logger_level
qlib.init()
tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO)
tmp_logger_01.info("1. tmp_logger_01 info show")
global_level = logging.WARNING + 1
set_global_logger_level(global_level)
tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO)
tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level)
tmp_logger_01.info("3. tmp_logger_01 info do not show")
"""
_handler_level_map = {}
qlib_logger = logging.root.manager.loggerDict.get("qlib", None) # pylint: disable=E1101
if qlib_logger is not None:
for _handler in qlib_logger.handlers:
_handler_level_map[_handler] = _handler.level
_handler.level = level
return _handler_level_map if return_orig_handler_level else None
|
set qlib.xxx logger handlers level
Parameters
----------
level: int
logger level
return_orig_handler_level: bool
return origin handler level map
Examples
---------
.. code-block:: python
import qlib
import logging
from qlib.log import get_module_logger, set_global_logger_level
qlib.init()
tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO)
tmp_logger_01.info("1. tmp_logger_01 info show")
global_level = logging.WARNING + 1
set_global_logger_level(global_level)
tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO)
tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level)
tmp_logger_01.info("3. tmp_logger_01 info do not show")
|
set_global_logger_level
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def set_global_logger_level_cm(level: int):
"""set qlib.xxx logger handlers level to use contextmanager
Parameters
----------
level: int
logger level
Examples
---------
.. code-block:: python
import qlib
import logging
from qlib.log import get_module_logger, set_global_logger_level_cm
qlib.init()
tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO)
tmp_logger_01.info("1. tmp_logger_01 info show")
global_level = logging.WARNING + 1
with set_global_logger_level_cm(global_level):
tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO)
tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level)
tmp_logger_01.info("3. tmp_logger_01 info do not show")
tmp_logger_01.info("4. tmp_logger_01 info show")
"""
_handler_level_map = set_global_logger_level(level, return_orig_handler_level=True)
try:
yield
finally:
for _handler, _level in _handler_level_map.items():
_handler.level = _level
|
set qlib.xxx logger handlers level to use contextmanager
Parameters
----------
level: int
logger level
Examples
---------
.. code-block:: python
import qlib
import logging
from qlib.log import get_module_logger, set_global_logger_level_cm
qlib.init()
tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO)
tmp_logger_01.info("1. tmp_logger_01 info show")
global_level = logging.WARNING + 1
with set_global_logger_level_cm(global_level):
tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO)
tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level)
tmp_logger_01.info("3. tmp_logger_01 info do not show")
tmp_logger_01.info("4. tmp_logger_01 info show")
|
set_global_logger_level_cm
|
python
|
microsoft/qlib
|
qlib/log.py
|
https://github.com/microsoft/qlib/blob/master/qlib/log.py
|
MIT
|
def init(default_conf="client", **kwargs):
"""
Parameters
----------
default_conf: str
the default value is client. Accepted values: client/server.
**kwargs :
clear_mem_cache: str
the default value is True;
Will the memory cache be clear.
It is often used to improve performance when init will be called for multiple times
skip_if_reg: bool: str
the default value is True;
When using the recorder, skip_if_reg can set to True to avoid loss of recorder.
"""
from .config import C # pylint: disable=C0415
from .data.cache import H # pylint: disable=C0415
logger = get_module_logger("Initialization")
skip_if_reg = kwargs.pop("skip_if_reg", False)
if skip_if_reg and C.registered:
# if we reinitialize Qlib during running an experiment `R.start`.
# it will result in loss of the recorder
logger.warning("Skip initialization because `skip_if_reg is True`")
return
clear_mem_cache = kwargs.pop("clear_mem_cache", True)
if clear_mem_cache:
H.clear()
C.set(default_conf, **kwargs)
get_module_logger.setLevel(C.logging_level)
# mount nfs
for _freq, provider_uri in C.provider_uri.items():
mount_path = C["mount_path"][_freq]
# check path if server/local
uri_type = C.dpm.get_uri_type(provider_uri)
if uri_type == C.LOCAL_URI:
if not Path(provider_uri).exists():
if C["auto_mount"]:
logger.error(
f"Invalid provider uri: {provider_uri}, please check if a valid provider uri has been set. This path does not exist."
)
else:
logger.warning(f"auto_path is False, please make sure {mount_path} is mounted")
elif uri_type == C.NFS_URI:
_mount_nfs_uri(provider_uri, C.dpm.get_data_uri(_freq), C["auto_mount"])
else:
raise NotImplementedError(f"This type of URI is not supported")
C.register()
if "flask_server" in C:
logger.info(f"flask_server={C['flask_server']}, flask_port={C['flask_port']}")
logger.info("qlib successfully initialized based on %s settings." % default_conf)
data_path = {_freq: C.dpm.get_data_uri(_freq) for _freq in C.dpm.provider_uri.keys()}
logger.info(f"data_path={data_path}")
|
Parameters
----------
default_conf: str
the default value is client. Accepted values: client/server.
**kwargs :
clear_mem_cache: str
the default value is True;
Will the memory cache be clear.
It is often used to improve performance when init will be called for multiple times
skip_if_reg: bool: str
the default value is True;
When using the recorder, skip_if_reg can set to True to avoid loss of recorder.
|
init
|
python
|
microsoft/qlib
|
qlib/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/__init__.py
|
MIT
|
def init_from_yaml_conf(conf_path, **kwargs):
"""init_from_yaml_conf
:param conf_path: A path to the qlib config in yml format
"""
if conf_path is None:
config = {}
else:
with open(conf_path) as f:
yaml = YAML(typ="safe", pure=True)
config = yaml.load(f)
config.update(kwargs)
default_conf = config.pop("default_conf", "client")
init(default_conf, **config)
|
init_from_yaml_conf
:param conf_path: A path to the qlib config in yml format
|
init_from_yaml_conf
|
python
|
microsoft/qlib
|
qlib/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/__init__.py
|
MIT
|
def get_project_path(config_name="config.yaml", cur_path: Union[Path, str, None] = None) -> Path:
"""
If users are building a project follow the following pattern.
- Qlib is a sub folder in project path
- There is a file named `config.yaml` in qlib.
For example:
If your project file system structure follows such a pattern
<project_path>/
- config.yaml
- ...some folders...
- qlib/
This folder will return <project_path>
NOTE: link is not supported here.
This method is often used when
- user want to use a relative config path instead of hard-coding qlib config path in code
Raises
------
FileNotFoundError:
If project path is not found
"""
if cur_path is None:
cur_path = Path(__file__).absolute().resolve()
cur_path = Path(cur_path)
while True:
if (cur_path / config_name).exists():
return cur_path
if cur_path == cur_path.parent:
raise FileNotFoundError("We can't find the project path")
cur_path = cur_path.parent
|
If users are building a project follow the following pattern.
- Qlib is a sub folder in project path
- There is a file named `config.yaml` in qlib.
For example:
If your project file system structure follows such a pattern
<project_path>/
- config.yaml
- ...some folders...
- qlib/
This folder will return <project_path>
NOTE: link is not supported here.
This method is often used when
- user want to use a relative config path instead of hard-coding qlib config path in code
Raises
------
FileNotFoundError:
If project path is not found
|
get_project_path
|
python
|
microsoft/qlib
|
qlib/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/__init__.py
|
MIT
|
def auto_init(**kwargs):
"""
This function will init qlib automatically with following priority
- Find the project configuration and init qlib
- The parsing process will be affected by the `conf_type` of the configuration file
- Init qlib with default config
- Skip initialization if already initialized
:**kwargs: it may contain following parameters
cur_path: the start path to find the project path
Here are two examples of the configuration
Example 1)
If you want to create a new project-specific config based on a shared configure, you can use `conf_type: ref`
.. code-block:: yaml
conf_type: ref
qlib_cfg: '<shared_yaml_config_path>' # this could be null reference no config from other files
# following configs in `qlib_cfg_update` is project=specific
qlib_cfg_update:
exp_manager:
class: "MLflowExpManager"
module_path: "qlib.workflow.expm"
kwargs:
uri: "file://<your mlflow experiment path>"
default_exp_name: "Experiment"
Example 2)
If you want to create simple a standalone config, you can use following config(a.k.a. `conf_type: origin`)
.. code-block:: python
exp_manager:
class: "MLflowExpManager"
module_path: "qlib.workflow.expm"
kwargs:
uri: "file://<your mlflow experiment path>"
default_exp_name: "Experiment"
"""
kwargs["skip_if_reg"] = kwargs.get("skip_if_reg", True)
try:
pp = get_project_path(cur_path=kwargs.pop("cur_path", None))
except FileNotFoundError:
init(**kwargs)
else:
logger = get_module_logger("Initialization")
conf_pp = pp / "config.yaml"
with conf_pp.open() as f:
yaml = YAML(typ="safe", pure=True)
conf = yaml.load(f)
conf_type = conf.get("conf_type", "origin")
if conf_type == "origin":
# The type of config is just like original qlib config
init_from_yaml_conf(conf_pp, **kwargs)
elif conf_type == "ref":
# This config type will be more convenient in following scenario
# - There is a shared configure file, and you don't want to edit it inplace.
# - The shared configure may be updated later, and you don't want to copy it.
# - You have some customized config.
qlib_conf_path = conf.get("qlib_cfg", None)
# merge the arguments
qlib_conf_update = conf.get("qlib_cfg_update", {})
for k, v in kwargs.items():
if k in qlib_conf_update:
logger.warning(f"`qlib_conf_update` from conf_pp is override by `kwargs` on key '{k}'")
qlib_conf_update.update(kwargs)
init_from_yaml_conf(qlib_conf_path, **qlib_conf_update)
logger.info(f"Auto load project config: {conf_pp}")
|
This function will init qlib automatically with following priority
- Find the project configuration and init qlib
- The parsing process will be affected by the `conf_type` of the configuration file
- Init qlib with default config
- Skip initialization if already initialized
:**kwargs: it may contain following parameters
cur_path: the start path to find the project path
Here are two examples of the configuration
Example 1)
If you want to create a new project-specific config based on a shared configure, you can use `conf_type: ref`
.. code-block:: yaml
conf_type: ref
qlib_cfg: '<shared_yaml_config_path>' # this could be null reference no config from other files
# following configs in `qlib_cfg_update` is project=specific
qlib_cfg_update:
exp_manager:
class: "MLflowExpManager"
module_path: "qlib.workflow.expm"
kwargs:
uri: "file://<your mlflow experiment path>"
default_exp_name: "Experiment"
Example 2)
If you want to create simple a standalone config, you can use following config(a.k.a. `conf_type: origin`)
.. code-block:: python
exp_manager:
class: "MLflowExpManager"
module_path: "qlib.workflow.expm"
kwargs:
uri: "file://<your mlflow experiment path>"
default_exp_name: "Experiment"
|
auto_init
|
python
|
microsoft/qlib
|
qlib/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/__init__.py
|
MIT
|
def __init__(
self,
init_cash: float = 1e9,
position_dict: dict = {},
freq: str = "day",
benchmark_config: dict = {},
pos_type: str = "Position",
port_metr_enabled: bool = True,
) -> None:
"""the trade account of backtest.
Parameters
----------
init_cash : float, optional
initial cash, by default 1e9
position_dict : Dict[
stock_id,
Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
]
initial stocks with parameters amount and price,
if there is no price key in the dict of stocks, it will be filled by _fill_stock_value.
by default {}.
"""
self._pos_type = pos_type
self._port_metr_enabled = port_metr_enabled
self.benchmark_config: dict = {} # avoid no attribute error
self.init_vars(init_cash, position_dict, freq, benchmark_config)
|
the trade account of backtest.
Parameters
----------
init_cash : float, optional
initial cash, by default 1e9
position_dict : Dict[
stock_id,
Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
]
initial stocks with parameters amount and price,
if there is no price key in the dict of stocks, it will be filled by _fill_stock_value.
by default {}.
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def reset(
self, freq: str | None = None, benchmark_config: dict | None = None, port_metr_enabled: bool | None = None
) -> None:
"""reset freq and report of account
Parameters
----------
freq : str, optional
frequency of account & report, by default None
benchmark_config : {}, optional
benchmark config of report, by default None
port_metr_enabled: bool
"""
if freq is not None:
self.freq = freq
if benchmark_config is not None:
self.benchmark_config = benchmark_config
if port_metr_enabled is not None:
self._port_metr_enabled = port_metr_enabled
self.reset_report(self.freq, self.benchmark_config)
|
reset freq and report of account
Parameters
----------
freq : str, optional
frequency of account & report, by default None
benchmark_config : {}, optional
benchmark config of report, by default None
port_metr_enabled: bool
|
reset
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def update_current_position(
self,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
trade_exchange: Exchange,
) -> None:
"""
Update current to make rtn consistent with earning at the end of bar, and update holding bar count of stock
"""
# update price for stock in the position and the profit from changed_price
# NOTE: updating position does not only serve portfolio metrics, it also serve the strategy
assert self.current_position is not None
if not self.current_position.skip_update():
stock_list = self.current_position.get_stock_list()
for code in stock_list:
# if suspended, no new price to be updated, profit is 0
if trade_exchange.check_stock_suspended(code, trade_start_time, trade_end_time):
continue
bar_close = cast(float, trade_exchange.get_close(code, trade_start_time, trade_end_time))
self.current_position.update_stock_price(stock_id=code, price=bar_close)
# update holding day count
# NOTE: updating bar_count does not only serve portfolio metrics, it also serve the strategy
self.current_position.add_count_all(bar=self.freq)
|
Update current to make rtn consistent with earning at the end of bar, and update holding bar count of stock
|
update_current_position
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def update_indicator(
self,
trade_start_time: pd.Timestamp,
trade_exchange: Exchange,
atomic: bool,
outer_trade_decision: BaseTradeDecision,
trade_info: list = [],
inner_order_indicators: List[BaseOrderIndicator] = [],
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]] = [],
indicator_config: dict = {},
) -> None:
"""update trade indicators and order indicators in each bar end"""
# TODO: will skip empty decisions make it faster? `outer_trade_decision.empty():`
# indicator is trading (e.g. high-frequency order execution) related analysis
self.indicator.reset()
# aggregate the information for each order
if atomic:
self.indicator.update_order_indicators(trade_info)
else:
self.indicator.agg_order_indicators(
inner_order_indicators,
decision_list=decision_list,
outer_trade_decision=outer_trade_decision,
trade_exchange=trade_exchange,
indicator_config=indicator_config,
)
# aggregate all the order metrics a single step
self.indicator.cal_trade_indicators(trade_start_time, self.freq, indicator_config)
# record the metrics
self.indicator.record(trade_start_time)
|
update trade indicators and order indicators in each bar end
|
update_indicator
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def update_bar_end(
self,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
trade_exchange: Exchange,
atomic: bool,
outer_trade_decision: BaseTradeDecision,
trade_info: list = [],
inner_order_indicators: List[BaseOrderIndicator] = [],
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]] = [],
indicator_config: dict = {},
) -> None:
"""update account at each trading bar step
Parameters
----------
trade_start_time : pd.Timestamp
closed start time of step
trade_end_time : pd.Timestamp
closed end time of step
trade_exchange : Exchange
trading exchange, used to update current
atomic : bool
whether the trading executor is atomic, which means there is no higher-frequency trading executor inside it
- if atomic is True, calculate the indicators with trade_info
- else, aggregate indicators with inner indicators
outer_trade_decision: BaseTradeDecision
external trade decision
trade_info : List[(Order, float, float, float)], optional
trading information, by default None
- necessary if atomic is True
- list of tuple(order, trade_val, trade_cost, trade_price)
inner_order_indicators : Indicator, optional
indicators of inner executor, by default None
- necessary if atomic is False
- used to aggregate outer indicators
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]] = None,
The decision list of the inner level: List[Tuple[<decision>, <start_time>, <end_time>]]
The inner level
indicator_config : dict, optional
config of calculating indicators, by default {}
"""
if atomic is True and trade_info is None:
raise ValueError("trade_info is necessary in atomic executor")
elif atomic is False and inner_order_indicators is None:
raise ValueError("inner_order_indicators is necessary in un-atomic executor")
# update current position and hold bar count in each bar end
self.update_current_position(trade_start_time, trade_end_time, trade_exchange)
if self.is_port_metr_enabled():
# portfolio_metrics is portfolio related analysis
self.update_portfolio_metrics(trade_start_time, trade_end_time)
self.update_hist_positions(trade_start_time)
# update indicator in each bar end
self.update_indicator(
trade_start_time=trade_start_time,
trade_exchange=trade_exchange,
atomic=atomic,
outer_trade_decision=outer_trade_decision,
trade_info=trade_info,
inner_order_indicators=inner_order_indicators,
decision_list=decision_list,
indicator_config=indicator_config,
)
|
update account at each trading bar step
Parameters
----------
trade_start_time : pd.Timestamp
closed start time of step
trade_end_time : pd.Timestamp
closed end time of step
trade_exchange : Exchange
trading exchange, used to update current
atomic : bool
whether the trading executor is atomic, which means there is no higher-frequency trading executor inside it
- if atomic is True, calculate the indicators with trade_info
- else, aggregate indicators with inner indicators
outer_trade_decision: BaseTradeDecision
external trade decision
trade_info : List[(Order, float, float, float)], optional
trading information, by default None
- necessary if atomic is True
- list of tuple(order, trade_val, trade_cost, trade_price)
inner_order_indicators : Indicator, optional
indicators of inner executor, by default None
- necessary if atomic is False
- used to aggregate outer indicators
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]] = None,
The decision list of the inner level: List[Tuple[<decision>, <start_time>, <end_time>]]
The inner level
indicator_config : dict, optional
config of calculating indicators, by default {}
|
update_bar_end
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def get_portfolio_metrics(self) -> Tuple[pd.DataFrame, dict]:
"""get the history portfolio_metrics and positions instance"""
if self.is_port_metr_enabled():
assert self.portfolio_metrics is not None
_portfolio_metrics = self.portfolio_metrics.generate_portfolio_metrics_dataframe()
_positions = self.get_hist_positions()
return _portfolio_metrics, _positions
else:
raise ValueError("generate_portfolio_metrics should be True if you want to generate portfolio_metrics")
|
get the history portfolio_metrics and positions instance
|
get_portfolio_metrics
|
python
|
microsoft/qlib
|
qlib/backtest/account.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/account.py
|
MIT
|
def backtest_loop(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
trade_strategy: BaseStrategy,
trade_executor: BaseExecutor,
) -> Tuple[PORT_METRIC, INDICATOR_METRIC]:
"""backtest function for the interaction of the outermost strategy and executor in the nested decision execution
please refer to the docs of `collect_data_loop`
Returns
-------
portfolio_dict: PORT_METRIC
it records the trading portfolio_metrics information
indicator_dict: INDICATOR_METRIC
it computes the trading indicator
"""
return_value: dict = {}
for _decision in collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value):
pass
portfolio_dict = cast(PORT_METRIC, return_value.get("portfolio_dict"))
indicator_dict = cast(INDICATOR_METRIC, return_value.get("indicator_dict"))
return portfolio_dict, indicator_dict
|
backtest function for the interaction of the outermost strategy and executor in the nested decision execution
please refer to the docs of `collect_data_loop`
Returns
-------
portfolio_dict: PORT_METRIC
it records the trading portfolio_metrics information
indicator_dict: INDICATOR_METRIC
it computes the trading indicator
|
backtest_loop
|
python
|
microsoft/qlib
|
qlib/backtest/backtest.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/backtest.py
|
MIT
|
def collect_data_loop(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
trade_strategy: BaseStrategy,
trade_executor: BaseExecutor,
return_value: dict | None = None,
) -> Generator[BaseTradeDecision, Optional[BaseTradeDecision], None]:
"""Generator for collecting the trade decision data for rl training
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : Union[pd.Timestamp, str]
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
trade_strategy : BaseStrategy
the outermost portfolio strategy
trade_executor : BaseExecutor
the outermost executor
return_value : dict
used for backtest_loop
Yields
-------
object
trade decision
"""
trade_executor.reset(start_time=start_time, end_time=end_time)
trade_strategy.reset(level_infra=trade_executor.get_level_infra())
with tqdm(total=trade_executor.trade_calendar.get_trade_len(), desc="backtest loop") as bar:
_execute_result = None
while not trade_executor.finished():
_trade_decision: BaseTradeDecision = trade_strategy.generate_trade_decision(_execute_result)
_execute_result = yield from trade_executor.collect_data(_trade_decision, level=0)
trade_strategy.post_exe_step(_execute_result)
bar.update(1)
trade_strategy.post_upper_level_exe_step()
if return_value is not None:
all_executors = trade_executor.get_all_executors()
portfolio_dict: PORT_METRIC = {}
indicator_dict: INDICATOR_METRIC = {}
for executor in all_executors:
key = "{}{}".format(*Freq.parse(executor.time_per_step))
if executor.trade_account.is_port_metr_enabled():
portfolio_dict[key] = executor.trade_account.get_portfolio_metrics()
indicator_df = executor.trade_account.get_trade_indicator().generate_trade_indicators_dataframe()
indicator_obj = executor.trade_account.get_trade_indicator()
indicator_dict[key] = (indicator_df, indicator_obj)
return_value.update({"portfolio_dict": portfolio_dict, "indicator_dict": indicator_dict})
|
Generator for collecting the trade decision data for rl training
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : Union[pd.Timestamp, str]
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
trade_strategy : BaseStrategy
the outermost portfolio strategy
trade_executor : BaseExecutor
the outermost executor
return_value : dict
used for backtest_loop
Yields
-------
object
trade decision
|
collect_data_loop
|
python
|
microsoft/qlib
|
qlib/backtest/backtest.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/backtest.py
|
MIT
|
def create(
code: str,
amount: float,
direction: OrderDir,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
) -> Order:
"""
help to create a order
# TODO: create order for unadjusted amount order
Parameters
----------
code : str
the id of the instrument
amount : float
**adjusted trading amount**
direction : OrderDir
trading direction
start_time : Union[str, pd.Timestamp] (optional)
The interval of the order which belongs to
end_time : Union[str, pd.Timestamp] (optional)
The interval of the order which belongs to
Returns
-------
Order:
The created order
"""
# NOTE: factor is a value belongs to the results section. User don't have to care about it when creating orders
return Order(
stock_id=code,
amount=amount,
start_time=None if start_time is None else pd.Timestamp(start_time),
end_time=None if end_time is None else pd.Timestamp(end_time),
direction=direction,
)
|
help to create a order
# TODO: create order for unadjusted amount order
Parameters
----------
code : str
the id of the instrument
amount : float
**adjusted trading amount**
direction : OrderDir
trading direction
start_time : Union[str, pd.Timestamp] (optional)
The interval of the order which belongs to
end_time : Union[str, pd.Timestamp] (optional)
The interval of the order which belongs to
Returns
-------
Order:
The created order
|
create
|
python
|
microsoft/qlib
|
qlib/backtest/decision.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/decision.py
|
MIT
|
def __init__(self, start_time: str | time, end_time: str | time) -> None:
"""
This is a callable class.
**NOTE**:
- It is designed for minute-bar for intra-day trading!!!!!
- Both start_time and end_time are **closed** in the range
Parameters
----------
start_time : str | time
e.g. "9:30"
end_time : str | time
e.g. "14:30"
"""
self.start_time = pd.Timestamp(start_time).time() if isinstance(start_time, str) else start_time
self.end_time = pd.Timestamp(end_time).time() if isinstance(end_time, str) else end_time
assert self.start_time < self.end_time
|
This is a callable class.
**NOTE**:
- It is designed for minute-bar for intra-day trading!!!!!
- Both start_time and end_time are **closed** in the range
Parameters
----------
start_time : str | time
e.g. "9:30"
end_time : str | time
e.g. "14:30"
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/decision.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/decision.py
|
MIT
|
def __init__(self, strategy: BaseStrategy, trade_range: Union[Tuple[int, int], TradeRange, None] = None) -> None:
"""
Parameters
----------
strategy : BaseStrategy
The strategy who make the decision
trade_range: Union[Tuple[int, int], Callable] (optional)
The index range for underlying strategy.
Here are two examples of trade_range for each type
1) Tuple[int, int]
start_index and end_index of the underlying strategy(both sides are closed)
2) TradeRange
"""
self.strategy = strategy
self.start_time, self.end_time = strategy.trade_calendar.get_step_time()
# upper strategy has no knowledge about the sub executor before `_init_sub_trading`
self.total_step: Optional[int] = None
if isinstance(trade_range, tuple):
# for Tuple[int, int]
trade_range = IdxTradeRange(*trade_range)
self.trade_range: Optional[TradeRange] = trade_range
|
Parameters
----------
strategy : BaseStrategy
The strategy who make the decision
trade_range: Union[Tuple[int, int], Callable] (optional)
The index range for underlying strategy.
Here are two examples of trade_range for each type
1) Tuple[int, int]
start_index and end_index of the underlying strategy(both sides are closed)
2) TradeRange
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/decision.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/decision.py
|
MIT
|
def update(self, trade_calendar: TradeCalendarManager) -> Optional[BaseTradeDecision]:
"""
Be called at the **start** of each step.
This function is design for following purpose
1) Leave a hook for the strategy who make `self` decision to update the decision itself
2) Update some information from the inner executor calendar
Parameters
----------
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
New update, use new decision. If no updates, return None (use previous decision (or unavailable))
"""
# purpose 1)
self.total_step = trade_calendar.get_trade_len()
# purpose 2)
return self.strategy.update_trade_decision(self, trade_calendar)
|
Be called at the **start** of each step.
This function is design for following purpose
1) Leave a hook for the strategy who make `self` decision to update the decision itself
2) Update some information from the inner executor calendar
Parameters
----------
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
New update, use new decision. If no updates, return None (use previous decision (or unavailable))
|
update
|
python
|
microsoft/qlib
|
qlib/backtest/decision.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/decision.py
|
MIT
|
def mod_inner_decision(self, inner_trade_decision: BaseTradeDecision) -> None:
"""
This method will be called on the inner_trade_decision after it is generated.
`inner_trade_decision` will be changed **inplace**.
Motivation of the `mod_inner_decision`
- Leave a hook for outer decision to affect the decision generated by the inner strategy
- e.g. the outmost strategy generate a time range for trading. But the upper layer can only affect the
nearest layer in the original design. With `mod_inner_decision`, the decision can passed through multiple
layers
Parameters
----------
inner_trade_decision : BaseTradeDecision
"""
# base class provide a default behaviour to modify inner_trade_decision
# trade_range should be propagated when inner trade_range is not set
if inner_trade_decision.trade_range is None:
inner_trade_decision.trade_range = self.trade_range
|
This method will be called on the inner_trade_decision after it is generated.
`inner_trade_decision` will be changed **inplace**.
Motivation of the `mod_inner_decision`
- Leave a hook for outer decision to affect the decision generated by the inner strategy
- e.g. the outmost strategy generate a time range for trading. But the upper layer can only affect the
nearest layer in the original design. With `mod_inner_decision`, the decision can passed through multiple
layers
Parameters
----------
inner_trade_decision : BaseTradeDecision
|
mod_inner_decision
|
python
|
microsoft/qlib
|
qlib/backtest/decision.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/decision.py
|
MIT
|
def check_stock_suspended(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> bool:
"""if stock is suspended(hence not tradable), True will be returned"""
# is suspended
if stock_id in self.quote.get_all_stock():
# suspended stocks are represented by None $close stock
# The $close may contain NaN,
close = self.quote.get_data(stock_id, start_time, end_time, "$close")
if close is None:
# if no close record exists
return True
elif isinstance(close, IndexData):
# **any** non-NaN $close represents trading opportunity may exist
# if all returned is nan, then the stock is suspended
return cast(bool, cast(IndexData, close).isna().all())
else:
# it is single value, make sure is not None
return np.isnan(close)
else:
# if the stock is not in the stock list, then it is not tradable and regarded as suspended
return True
|
if stock is suspended(hence not tradable), True will be returned
|
check_stock_suspended
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def deal_order(
self,
order: Order,
trade_account: Account | None = None,
position: BasePosition | None = None,
dealt_order_amount: Dict[str, float] = defaultdict(float),
) -> Tuple[float, float, float]:
"""
Deal order when the actual transaction
the results section in `Order` will be changed.
:param order: Deal the order.
:param trade_account: Trade account to be updated after dealing the order.
:param position: position to be updated after dealing the order.
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_val, trade_cost, trade_price
"""
# check order first.
if not self.check_order(order):
order.deal_amount = 0.0
# using np.nan instead of None to make it more convenient to show the value in format string
self.logger.debug(f"Order failed due to trading limitation: {order}")
return 0.0, 0.0, np.nan
if trade_account is not None and position is not None:
raise ValueError("trade_account and position can only choose one")
# NOTE: order will be changed in this function
trade_price, trade_val, trade_cost = self._calc_trade_info_by_order(
order,
trade_account.current_position if trade_account else position,
dealt_order_amount,
)
if trade_val > 1e-5:
# If the order can only be deal 0 value. Nothing to be updated
# Otherwise, it will result in
# 1) some stock with 0 value in the position
# 2) `trade_unit` of trade_cost will be lost in user account
if trade_account:
trade_account.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
elif position:
position.update_order(order=order, trade_val=trade_val, cost=trade_cost, trade_price=trade_price)
return trade_val, trade_cost, trade_price
|
Deal order when the actual transaction
the results section in `Order` will be changed.
:param order: Deal the order.
:param trade_account: Trade account to be updated after dealing the order.
:param position: position to be updated after dealing the order.
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_val, trade_cost, trade_price
|
deal_order
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def get_volume(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
method: Optional[str] = "sum",
) -> Union[None, int, float, bool, IndexData]:
"""get the total deal volume of stock with `stock_id` between the time interval [start_time, end_time)"""
return self.quote.get_data(stock_id, start_time, end_time, field="$volume", method=method)
|
get the total deal volume of stock with `stock_id` between the time interval [start_time, end_time)
|
get_volume
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def get_factor(
self,
stock_id: str,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> Optional[float]:
"""
Returns
-------
Optional[float]:
`None`: if the stock is suspended `None` may be returned
`float`: return factor if the factor exists
"""
assert start_time is not None and end_time is not None, "the time range must be given"
if stock_id not in self.quote.get_all_stock():
return None
return self.quote.get_data(stock_id, start_time, end_time, field="$factor", method="ts_data_last")
|
Returns
-------
Optional[float]:
`None`: if the stock is suspended `None` may be returned
`float`: return factor if the factor exists
|
get_factor
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def generate_amount_position_from_weight_position(
self,
weight_position: dict,
cash: float,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
direction: OrderDir = OrderDir.BUY,
) -> dict:
"""
Generates the target position according to the weight and the cash.
NOTE: All the cash will be assigned to the tradable stock.
Parameter:
weight_position : dict {stock_id : weight}; allocate cash by weight_position
among then, weight must be in this range: 0 < weight < 1
cash : cash
start_time : the start time point of the step
end_time : the end time point of the step
direction : the direction of the deal price for estimating the amount
# NOTE: this function is used for calculating target position. So the default direction is buy
"""
# calculate the total weight of tradable value
tradable_weight = 0.0
for stock_id, wp in weight_position.items():
if self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
# weight_position must be greater than 0 and less than 1
if wp < 0 or wp > 1:
raise ValueError(
"weight_position is {}, " "weight_position is not in the range of (0, 1).".format(wp),
)
tradable_weight += wp
if tradable_weight - 1.0 >= 1e-5:
raise ValueError("tradable_weight is {}, can not greater than 1.".format(tradable_weight))
amount_dict = {}
for stock_id in weight_position:
if weight_position[stock_id] > 0.0 and self.is_stock_tradable(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
):
amount_dict[stock_id] = (
cash
* weight_position[stock_id]
/ tradable_weight
// self.get_deal_price(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
direction=direction,
)
)
return amount_dict
|
Generates the target position according to the weight and the cash.
NOTE: All the cash will be assigned to the tradable stock.
Parameter:
weight_position : dict {stock_id : weight}; allocate cash by weight_position
among then, weight must be in this range: 0 < weight < 1
cash : cash
start_time : the start time point of the step
end_time : the end time point of the step
direction : the direction of the deal price for estimating the amount
# NOTE: this function is used for calculating target position. So the default direction is buy
|
generate_amount_position_from_weight_position
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def get_real_deal_amount(self, current_amount: float, target_amount: float, factor: float | None = None) -> float:
"""
Calculate the real adjust deal amount when considering the trading unit
:param current_amount:
:param target_amount:
:param factor:
:return real_deal_amount; Positive deal_amount indicates buying more stock.
"""
if current_amount == target_amount:
return 0
elif current_amount < target_amount:
deal_amount = target_amount - current_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return deal_amount
else:
if target_amount == 0:
return -current_amount
else:
deal_amount = current_amount - target_amount
deal_amount = self.round_amount_by_trade_unit(deal_amount, factor)
return -deal_amount
|
Calculate the real adjust deal amount when considering the trading unit
:param current_amount:
:param target_amount:
:param factor:
:return real_deal_amount; Positive deal_amount indicates buying more stock.
|
get_real_deal_amount
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def generate_order_for_target_amount_position(
self,
target_position: dict,
current_position: dict,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
) -> List[Order]:
"""
Note: some future information is used in this function
Parameter:
target_position : dict { stock_id : amount }
current_position : dict { stock_id : amount}
trade_unit : trade_unit
down sample : for amount 321 and trade_unit 100, deal_amount is 300
deal order on trade_date
"""
# split buy and sell for further use
buy_order_list = []
sell_order_list = []
# three parts: kept stock_id, dropped stock_id, new stock_id
# handle kept stock_id
# because the order of the set is not fixed, the trading order of the stock is different, so that the backtest
# results of the same parameter are different;
# so here we sort stock_id, and then randomly shuffle the order of stock_id
# because the same random seed is used, the final stock_id order is fixed
sorted_ids = sorted(set(list(current_position.keys()) + list(target_position.keys())))
random.seed(0)
random.shuffle(sorted_ids)
for stock_id in sorted_ids:
# Do not generate order for the non-tradable stocks
if not self.is_stock_tradable(stock_id=stock_id, start_time=start_time, end_time=end_time):
continue
target_amount = target_position.get(stock_id, 0)
current_amount = current_position.get(stock_id, 0)
factor = self.get_factor(stock_id, start_time=start_time, end_time=end_time)
deal_amount = self.get_real_deal_amount(current_amount, target_amount, factor)
if deal_amount == 0:
continue
if deal_amount > 0:
# buy stock
buy_order_list.append(
Order(
stock_id=stock_id,
amount=deal_amount,
direction=Order.BUY,
start_time=start_time,
end_time=end_time,
factor=factor,
),
)
else:
# sell stock
sell_order_list.append(
Order(
stock_id=stock_id,
amount=abs(deal_amount),
direction=Order.SELL,
start_time=start_time,
end_time=end_time,
factor=factor,
),
)
# return order_list : buy + sell
return sell_order_list + buy_order_list
|
Note: some future information is used in this function
Parameter:
target_position : dict { stock_id : amount }
current_position : dict { stock_id : amount}
trade_unit : trade_unit
down sample : for amount 321 and trade_unit 100, deal_amount is 300
deal order on trade_date
|
generate_order_for_target_amount_position
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def calculate_amount_position_value(
self,
amount_dict: dict,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
only_tradable: bool = False,
direction: OrderDir = OrderDir.SELL,
) -> float:
"""Parameter
position : Position()
amount_dict : {stock_id : amount}
direction : the direction of the deal price for estimating the amount
# NOTE:
This function is used for calculating current position value.
So the default direction is sell.
"""
value = 0
for stock_id in amount_dict:
if not only_tradable or (
not self.check_stock_suspended(stock_id=stock_id, start_time=start_time, end_time=end_time)
and not self.check_stock_limit(stock_id=stock_id, start_time=start_time, end_time=end_time)
):
value += (
self.get_deal_price(
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
direction=direction,
)
* amount_dict[stock_id]
)
return value
|
Parameter
position : Position()
amount_dict : {stock_id : amount}
direction : the direction of the deal price for estimating the amount
# NOTE:
This function is used for calculating current position value.
So the default direction is sell.
|
calculate_amount_position_value
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def _get_factor_or_raise_error(
self,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> float:
"""Please refer to the docs of get_amount_of_trade_unit"""
if factor is None:
if stock_id is not None and start_time is not None and end_time is not None:
factor = self.get_factor(stock_id=stock_id, start_time=start_time, end_time=end_time)
else:
raise ValueError(f"`factor` and (`stock_id`, `start_time`, `end_time`) can't both be None")
assert factor is not None
return factor
|
Please refer to the docs of get_amount_of_trade_unit
|
_get_factor_or_raise_error
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def get_amount_of_trade_unit(
self,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> Optional[float]:
"""
get the trade unit of amount based on **factor**
the factor can be given directly or calculated in given time range and stock id.
`factor` has higher priority than `stock_id`, `start_time` and `end_time`
Parameters
----------
factor : float
the adjusted factor
stock_id : str
the id of the stock
start_time :
the start time of trading range
end_time :
the end time of trading range
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
factor = self._get_factor_or_raise_error(
factor=factor,
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
)
return self.trade_unit / factor
else:
return None
|
get the trade unit of amount based on **factor**
the factor can be given directly or calculated in given time range and stock id.
`factor` has higher priority than `stock_id`, `start_time` and `end_time`
Parameters
----------
factor : float
the adjusted factor
stock_id : str
the id of the stock
start_time :
the start time of trading range
end_time :
the end time of trading range
|
get_amount_of_trade_unit
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def round_amount_by_trade_unit(
self,
deal_amount: float,
factor: float | None = None,
stock_id: str | None = None,
start_time: pd.Timestamp = None,
end_time: pd.Timestamp = None,
) -> float:
"""Parameter
Please refer to the docs of get_amount_of_trade_unit
deal_amount : float, adjusted amount
factor : float, adjusted factor
return : float, real amount
"""
if not self.trade_w_adj_price and self.trade_unit is not None:
# the minimal amount is 1. Add 0.1 for solving precision problem.
factor = self._get_factor_or_raise_error(
factor=factor,
stock_id=stock_id,
start_time=start_time,
end_time=end_time,
)
return (deal_amount * factor + 0.1) // self.trade_unit * self.trade_unit / factor
return deal_amount
|
Parameter
Please refer to the docs of get_amount_of_trade_unit
deal_amount : float, adjusted amount
factor : float, adjusted factor
return : float, real amount
|
round_amount_by_trade_unit
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def _calc_trade_info_by_order(
self,
order: Order,
position: Optional[BasePosition],
dealt_order_amount: dict,
) -> Tuple[float, float, float]:
"""
Calculation of trade info
**NOTE**: Order will be changed in this function
:param order:
:param position: Position
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_price, trade_val, trade_cost
"""
trade_price = cast(
float,
self.get_deal_price(order.stock_id, order.start_time, order.end_time, direction=order.direction),
)
total_trade_val = cast(float, self.get_volume(order.stock_id, order.start_time, order.end_time)) * trade_price
order.factor = self.get_factor(order.stock_id, order.start_time, order.end_time)
order.deal_amount = order.amount # set to full amount and clip it step by step
# Clipping amount first
# - It simulates that the order is rejected directly by the exchange due to large order
# Another choice is placing it after rounding the order
# - It simulates that the large order is submitted, but partial is dealt regardless of rounding by trading unit.
self._clip_amount_by_volume(order, dealt_order_amount)
# TODO: the adjusted cost ratio can be overestimated as deal_amount will be clipped in the next steps
trade_val = order.deal_amount * trade_price
if not total_trade_val or np.isnan(total_trade_val):
# TODO: assert trade_val == 0, f"trade_val != 0, total_trade_val: {total_trade_val}; order info: {order}"
adj_cost_ratio = self.impact_cost
else:
adj_cost_ratio = self.impact_cost * (trade_val / total_trade_val) ** 2
if order.direction == Order.SELL:
cost_ratio = self.close_cost + adj_cost_ratio
# sell
# if we don't know current position, we choose to sell all
# Otherwise, we clip the amount based on current position
if position is not None:
current_amount = (
position.get_stock_amount(order.stock_id) if position.check_stock(order.stock_id) else 0
)
if not np.isclose(order.deal_amount, current_amount):
# when not selling last stock. rounding is necessary
order.deal_amount = self.round_amount_by_trade_unit(
min(current_amount, order.deal_amount),
order.factor,
)
# in case of negative value of cash
if position.get_cash() + order.deal_amount * trade_price < max(
order.deal_amount * trade_price * cost_ratio,
self.min_cost,
):
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cash limitation: {order}")
elif order.direction == Order.BUY:
cost_ratio = self.open_cost + adj_cost_ratio
# buy
if position is not None:
cash = position.get_cash()
trade_val = order.deal_amount * trade_price
if cash < max(trade_val * cost_ratio, self.min_cost):
# cash cannot cover cost
order.deal_amount = 0
self.logger.debug(f"Order clipped due to cost higher than cash: {order}")
elif cash < trade_val + max(trade_val * cost_ratio, self.min_cost):
# The money is not enough
max_buy_amount = self._get_buy_amount_by_cash_limit(trade_price, cash, cost_ratio)
order.deal_amount = self.round_amount_by_trade_unit(
min(max_buy_amount, order.deal_amount),
order.factor,
)
self.logger.debug(f"Order clipped due to cash limitation: {order}")
else:
# The money is enough
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
# Unknown amount of money. Just round the amount
order.deal_amount = self.round_amount_by_trade_unit(order.deal_amount, order.factor)
else:
raise NotImplementedError("order direction {} error".format(order.direction))
trade_val = order.deal_amount * trade_price
trade_cost = max(trade_val * cost_ratio, self.min_cost)
if trade_val <= 1e-5:
# if dealing is not successful, the trade_cost should be zero.
trade_cost = 0
return trade_price, trade_val, trade_cost
|
Calculation of trade info
**NOTE**: Order will be changed in this function
:param order:
:param position: Position
:param dealt_order_amount: the dealt order amount dict with the format of {stock_id: float}
:return: trade_price, trade_val, trade_cost
|
_calc_trade_info_by_order
|
python
|
microsoft/qlib
|
qlib/backtest/exchange.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/exchange.py
|
MIT
|
def __init__(
self,
time_per_step: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
indicator_config: dict = {},
generate_portfolio_metrics: bool = False,
verbose: bool = False,
track_data: bool = False,
trade_exchange: Exchange | None = None,
common_infra: CommonInfrastructure | None = None,
settle_type: str = BasePosition.ST_NO,
**kwargs: Any,
) -> None:
"""
Parameters
----------
time_per_step : str
trade time per trading step, used for generate the trade calendar
show_indicator: bool, optional
whether to show indicators, :
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
indicator_config: dict, optional
config for calculating trade indicator, including the following fields:
- 'show_indicator': whether to show indicators, optional, default by False. The indicators includes
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
- 'pa_config': config for calculating price advantage(pa), optional
- 'base_price': the based price than which the trading price is advanced, Optional, default by 'twap'
- If 'base_price' is 'twap', the based price is the time weighted average price
- If 'base_price' is 'vwap', the based price is the volume weighted average price
- 'weight_method': weighted method when calculating total trading pa by different orders' pa in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' pa
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' pa
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' pa
- 'ffr_config': config for calculating fulfill rate(ffr), optional
- 'weight_method': weighted method when calculating total trading ffr by different orders' ffr in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' ffr
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' ffr
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' ffr
Example:
{
'show_indicator': True,
'pa_config': {
"agg": "twap", # "vwap"
"price": "$close", # default to use deal price of the exchange
},
'ffr_config':{
'weight_method': 'value_weighted',
}
}
generate_portfolio_metrics : bool, optional
whether to generate portfolio_metrics, by default False
verbose : bool, optional
whether to print trading info, by default False
track_data : bool, optional
whether to generate trade_decision, will be used when training rl agent
- If `self.track_data` is true, when making data for training, the input `trade_decision` of `execute` will
be generated by `collect_data`
- Else, `trade_decision` will not be generated
trade_exchange : Exchange
exchange that provides market info, used to generate portfolio_metrics
- If generate_portfolio_metrics is None, trade_exchange will be ignored
- Else If `trade_exchange` is None, self.trade_exchange will be set with common_infra
common_infra : CommonInfrastructure, optional:
common infrastructure for backtesting, may including:
- trade_account : Account, optional
trade account for trading
- trade_exchange : Exchange, optional
exchange that provides market info
settle_type : str
Please refer to the docs of BasePosition.settle_start
"""
self.time_per_step = time_per_step
self.indicator_config = indicator_config
self.generate_portfolio_metrics = generate_portfolio_metrics
self.verbose = verbose
self.track_data = track_data
self._trade_exchange = trade_exchange
self.level_infra = LevelInfrastructure()
self.level_infra.reset_infra(common_infra=common_infra, executor=self)
self._settle_type = settle_type
self.reset(start_time=start_time, end_time=end_time, common_infra=common_infra)
if common_infra is None:
get_module_logger("BaseExecutor").warning(f"`common_infra` is not set for {self}")
# record deal order amount in one day
self.dealt_order_amount: Dict[str, float] = defaultdict(float)
self.deal_day = None
|
Parameters
----------
time_per_step : str
trade time per trading step, used for generate the trade calendar
show_indicator: bool, optional
whether to show indicators, :
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
indicator_config: dict, optional
config for calculating trade indicator, including the following fields:
- 'show_indicator': whether to show indicators, optional, default by False. The indicators includes
- 'pa', the price advantage
- 'pos', the positive rate
- 'ffr', the fulfill rate
- 'pa_config': config for calculating price advantage(pa), optional
- 'base_price': the based price than which the trading price is advanced, Optional, default by 'twap'
- If 'base_price' is 'twap', the based price is the time weighted average price
- If 'base_price' is 'vwap', the based price is the volume weighted average price
- 'weight_method': weighted method when calculating total trading pa by different orders' pa in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' pa
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' pa
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' pa
- 'ffr_config': config for calculating fulfill rate(ffr), optional
- 'weight_method': weighted method when calculating total trading ffr by different orders' ffr in each
step, optional, default by 'mean'
- If 'weight_method' is 'mean', calculating mean value of different orders' ffr
- If 'weight_method' is 'amount_weighted', calculating amount weighted average value of different
orders' ffr
- If 'weight_method' is 'value_weighted', calculating value weighted average value of different
orders' ffr
Example:
{
'show_indicator': True,
'pa_config': {
"agg": "twap", # "vwap"
"price": "$close", # default to use deal price of the exchange
},
'ffr_config':{
'weight_method': 'value_weighted',
}
}
generate_portfolio_metrics : bool, optional
whether to generate portfolio_metrics, by default False
verbose : bool, optional
whether to print trading info, by default False
track_data : bool, optional
whether to generate trade_decision, will be used when training rl agent
- If `self.track_data` is true, when making data for training, the input `trade_decision` of `execute` will
be generated by `collect_data`
- Else, `trade_decision` will not be generated
trade_exchange : Exchange
exchange that provides market info, used to generate portfolio_metrics
- If generate_portfolio_metrics is None, trade_exchange will be ignored
- Else If `trade_exchange` is None, self.trade_exchange will be set with common_infra
common_infra : CommonInfrastructure, optional:
common infrastructure for backtesting, may including:
- trade_account : Account, optional
trade account for trading
- trade_exchange : Exchange, optional
exchange that provides market info
settle_type : str
Please refer to the docs of BasePosition.settle_start
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def reset(self, common_infra: CommonInfrastructure | None = None, **kwargs: Any) -> None:
"""
- reset `start_time` and `end_time`, used in trade calendar
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
"""
if "start_time" in kwargs or "end_time" in kwargs:
start_time = kwargs.get("start_time")
end_time = kwargs.get("end_time")
self.level_infra.reset_cal(freq=self.time_per_step, start_time=start_time, end_time=end_time)
if common_infra is not None:
self.reset_common_infra(common_infra)
|
- reset `start_time` and `end_time`, used in trade calendar
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
|
reset
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def execute(self, trade_decision: BaseTradeDecision, level: int = 0) -> List[object]:
"""execute the trade decision and return the executed result
NOTE: this function is never used directly in the framework. Should we delete it?
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor
Returns
----------
execute_result : List[object]
the executed result for trade decision
"""
return_value: dict = {}
for _decision in self.collect_data(trade_decision, return_value=return_value, level=level):
pass
return cast(list, return_value.get("execute_result"))
|
execute the trade decision and return the executed result
NOTE: this function is never used directly in the framework. Should we delete it?
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor
Returns
----------
execute_result : List[object]
the executed result for trade decision
|
execute
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def _collect_data(
self,
trade_decision: BaseTradeDecision,
level: int = 0,
) -> Union[Generator[Any, Any, Tuple[List[object], dict]], Tuple[List[object], dict]]:
"""
Please refer to the doc of collect_data
The only difference between `_collect_data` and `collect_data` is that some common steps are moved into
collect_data
Parameters
----------
Please refer to the doc of collect_data
Returns
-------
Tuple[List[object], dict]:
(<the executed result for trade decision>, <the extra kwargs for `self.trade_account.update_bar_end`>)
"""
|
Please refer to the doc of collect_data
The only difference between `_collect_data` and `collect_data` is that some common steps are moved into
collect_data
Parameters
----------
Please refer to the doc of collect_data
Returns
-------
Tuple[List[object], dict]:
(<the executed result for trade decision>, <the extra kwargs for `self.trade_account.update_bar_end`>)
|
_collect_data
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def collect_data(
self,
trade_decision: BaseTradeDecision,
return_value: dict | None = None,
level: int = 0,
) -> Generator[Any, Any, List[object]]:
"""Generator for collecting the trade decision data for rl training
his function will make a step forward
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor. 0 indicates the top level
return_value : dict
the mem address to return the value
e.g. {"return_value": <the executed result>}
Returns
----------
execute_result : List[object]
the executed result for trade decision.
** NOTE!!!! **:
1) This is necessary, The return value of generator will be used in NestedExecutor
2) Please note the executed results are not merged.
Yields
-------
object
trade decision
"""
if self.track_data:
yield trade_decision
atomic = not issubclass(self.__class__, NestedExecutor) # issubclass(A, A) is True
if atomic and trade_decision.get_range_limit(default_value=None) is not None:
raise ValueError("atomic executor doesn't support specify `range_limit`")
if self._settle_type != BasePosition.ST_NO:
self.trade_account.current_position.settle_start(self._settle_type)
obj = self._collect_data(trade_decision=trade_decision, level=level)
if isinstance(obj, GeneratorType):
yield_res = yield from obj
assert isinstance(yield_res, tuple) and len(yield_res) == 2
res, kwargs = yield_res
else:
# Some concrete executor don't have inner decisions
res, kwargs = obj
trade_start_time, trade_end_time = self.trade_calendar.get_step_time()
# Account will not be changed in this function
self.trade_account.update_bar_end(
trade_start_time,
trade_end_time,
self.trade_exchange,
atomic=atomic,
outer_trade_decision=trade_decision,
indicator_config=self.indicator_config,
**kwargs,
)
self.trade_calendar.step()
if self._settle_type != BasePosition.ST_NO:
self.trade_account.current_position.settle_commit()
if return_value is not None:
return_value.update({"execute_result": res})
return res
|
Generator for collecting the trade decision data for rl training
his function will make a step forward
Parameters
----------
trade_decision : BaseTradeDecision
level : int
the level of current executor. 0 indicates the top level
return_value : dict
the mem address to return the value
e.g. {"return_value": <the executed result>}
Returns
----------
execute_result : List[object]
the executed result for trade decision.
** NOTE!!!! **:
1) This is necessary, The return value of generator will be used in NestedExecutor
2) Please note the executed results are not merged.
Yields
-------
object
trade decision
|
collect_data
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def reset_common_infra(self, common_infra: CommonInfrastructure, copy_trade_account: bool = False) -> None:
"""
reset infrastructure for trading
- reset inner_strategy and inner_executor common infra
"""
# NOTE: please refer to the docs of BaseExecutor.reset_common_infra for the meaning of `copy_trade_account`
# The first level follow the `copy_trade_account` from the upper level
super(NestedExecutor, self).reset_common_infra(common_infra, copy_trade_account=copy_trade_account)
# The lower level have to copy the trade_account
self.inner_executor.reset_common_infra(common_infra, copy_trade_account=True)
self.inner_strategy.reset_common_infra(common_infra)
|
reset infrastructure for trading
- reset inner_strategy and inner_executor common infra
|
reset_common_infra
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def __init__(
self,
time_per_step: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
indicator_config: dict = {},
generate_portfolio_metrics: bool = False,
verbose: bool = False,
track_data: bool = False,
common_infra: CommonInfrastructure | None = None,
trade_type: str = TT_SERIAL,
**kwargs: Any,
) -> None:
"""
Parameters
----------
trade_type: str
please refer to the doc of `TT_SERIAL` & `TT_PARAL`
"""
super(SimulatorExecutor, self).__init__(
time_per_step=time_per_step,
start_time=start_time,
end_time=end_time,
indicator_config=indicator_config,
generate_portfolio_metrics=generate_portfolio_metrics,
verbose=verbose,
track_data=track_data,
common_infra=common_infra,
**kwargs,
)
self.trade_type = trade_type
|
Parameters
----------
trade_type: str
please refer to the doc of `TT_SERIAL` & `TT_PARAL`
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def _get_order_iterator(self, trade_decision: BaseTradeDecision) -> List[Order]:
"""
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision given by the strategy
Returns
-------
List[Order]:
get a list orders according to `self.trade_type`
"""
orders = _retrieve_orders_from_decision(trade_decision)
if self.trade_type == self.TT_SERIAL:
# Orders will be traded in a parallel way
order_it = orders
elif self.trade_type == self.TT_PARAL:
# NOTE: !!!!!!!
# Assumption: there will not be orders in different trading direction in a single step of a strategy !!!!
# The parallel trading failure will be caused only by the conflicts of money
# Therefore, make the buying go first will make sure the conflicts happen.
# It equals to parallel trading after sorting the order by direction
order_it = sorted(orders, key=lambda order: -order.direction)
else:
raise NotImplementedError(f"This type of input is not supported")
return order_it
|
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision given by the strategy
Returns
-------
List[Order]:
get a list orders according to `self.trade_type`
|
_get_order_iterator
|
python
|
microsoft/qlib
|
qlib/backtest/executor.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/executor.py
|
MIT
|
def get_data(
self,
stock_id: str,
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
field: Union[str],
method: Optional[str] = None,
) -> Union[None, int, float, bool, IndexData]:
"""get the specific field of stock data during start time and end_time,
and apply method to the data.
Example:
.. code-block::
$close $volume
instrument datetime
SH600000 2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
SH600655 2010-01-04 2699.567383 158193.328125
2010-01-08 2612.359619 77501.406250
2010-01-11 2712.982422 160852.390625
2010-01-12 2788.688232 164587.937500
2010-01-13 2790.604004 145460.453125
this function is used for three case:
1. method is not None. It returns int/float/bool/None.
- It will return None in one case, the method return None
print(get_data(stock_id="SH600000", start_time="2010-01-04", end_time="2010-01-06", field="$close", method="last"))
85.713585
2. method is None. It returns IndexData.
print(get_data(stock_id="SH600000", start_time="2010-01-04", end_time="2010-01-06", field="$close", method=None))
IndexData([86.778313, 87.433578, 85.713585], [2010-01-04, 2010-01-05, 2010-01-06])
Parameters
----------
stock_id: str
start_time : Union[pd.Timestamp, str]
closed start time for backtest
end_time : Union[pd.Timestamp, str]
closed end time for backtest
field : str
the columns of data to fetch
method : Union[str, None]
the method apply to data.
e.g [None, "last", "all", "sum", "mean", "ts_data_last"]
Return
----------
Union[None, int, float, bool, IndexData]
it will return None in following cases
- There is no stock data which meet the query criterion from data source.
- The `method` returns None
"""
raise NotImplementedError(f"Please implement the `get_data` method")
|
get the specific field of stock data during start time and end_time,
and apply method to the data.
Example:
.. code-block::
$close $volume
instrument datetime
SH600000 2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
SH600655 2010-01-04 2699.567383 158193.328125
2010-01-08 2612.359619 77501.406250
2010-01-11 2712.982422 160852.390625
2010-01-12 2788.688232 164587.937500
2010-01-13 2790.604004 145460.453125
this function is used for three case:
1. method is not None. It returns int/float/bool/None.
- It will return None in one case, the method return None
print(get_data(stock_id="SH600000", start_time="2010-01-04", end_time="2010-01-06", field="$close", method="last"))
85.713585
2. method is None. It returns IndexData.
print(get_data(stock_id="SH600000", start_time="2010-01-04", end_time="2010-01-06", field="$close", method=None))
IndexData([86.778313, 87.433578, 85.713585], [2010-01-04, 2010-01-05, 2010-01-06])
Parameters
----------
stock_id: str
start_time : Union[pd.Timestamp, str]
closed start time for backtest
end_time : Union[pd.Timestamp, str]
closed end time for backtest
field : str
the columns of data to fetch
method : Union[str, None]
the method apply to data.
e.g [None, "last", "all", "sum", "mean", "ts_data_last"]
Return
----------
Union[None, int, float, bool, IndexData]
it will return None in following cases
- There is no stock data which meet the query criterion from data source.
- The `method` returns None
|
get_data
|
python
|
microsoft/qlib
|
qlib/backtest/high_performance_ds.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/high_performance_ds.py
|
MIT
|
def __init__(self, quote_df: pd.DataFrame, freq: str, region: str = "cn") -> None:
"""NumpyQuote
Parameters
----------
quote_df : pd.DataFrame
the init dataframe from qlib.
self.data : Dict(stock_id, IndexData.DataFrame)
"""
super().__init__(quote_df=quote_df, freq=freq)
quote_dict = {}
for stock_id, stock_val in quote_df.groupby(level="instrument", group_keys=False):
quote_dict[stock_id] = idd.MultiData(stock_val.droplevel(level="instrument"))
quote_dict[stock_id].sort_index() # To support more flexible slicing, we must sort data first
self.data = quote_dict
n, unit = Freq.parse(freq)
if unit in Freq.SUPPORT_CAL_LIST:
self.freq = Freq.get_timedelta(1, unit)
else:
raise ValueError(f"{freq} is not supported in NumpyQuote")
self.region = region
|
NumpyQuote
Parameters
----------
quote_df : pd.DataFrame
the init dataframe from qlib.
self.data : Dict(stock_id, IndexData.DataFrame)
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/high_performance_ds.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/high_performance_ds.py
|
MIT
|
def transfer(self, func: Callable, new_col: str = None) -> Optional[BaseSingleMetric]:
"""compute new metric with existing metrics.
Parameters
----------
func : Callable
the func of computing new metric.
the kwargs of func will be replaced with metric data by name in this function.
e.g.
def func(pa):
return (pa > 0).sum() / pa.count()
new_col : str, optional
New metric will be assigned in the data if new_col is not None, by default None.
Return
----------
BaseSingleMetric
new metric.
"""
func_sig = inspect.signature(func).parameters.keys()
func_kwargs = {sig: self.data[sig] for sig in func_sig}
tmp_metric = func(**func_kwargs)
if new_col is not None:
self.data[new_col] = tmp_metric
return None
else:
return tmp_metric
|
compute new metric with existing metrics.
Parameters
----------
func : Callable
the func of computing new metric.
the kwargs of func will be replaced with metric data by name in this function.
e.g.
def func(pa):
return (pa > 0).sum() / pa.count()
new_col : str, optional
New metric will be assigned in the data if new_col is not None, by default None.
Return
----------
BaseSingleMetric
new metric.
|
transfer
|
python
|
microsoft/qlib
|
qlib/backtest/high_performance_ds.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/high_performance_ds.py
|
MIT
|
def sum_all_indicators(
order_indicator: BaseOrderIndicator,
indicators: List[BaseOrderIndicator],
metrics: Union[str, List[str]],
fill_value: float = 0,
) -> None:
"""sum indicators with the same metrics.
and assign to the order_indicator(BaseOrderIndicator).
NOTE: indicators could be a empty list when orders in lower level all fail.
Parameters
----------
order_indicator : BaseOrderIndicator
the order indicator to assign.
indicators : List[BaseOrderIndicator]
the list of all inner indicators.
metrics : Union[str, List[str]]
all metrics needs to be sumed.
fill_value : float, optional
fill np.nan with value. By default None.
"""
raise NotImplementedError(f"Please implement the 'sum_all_indicators' method")
|
sum indicators with the same metrics.
and assign to the order_indicator(BaseOrderIndicator).
NOTE: indicators could be a empty list when orders in lower level all fail.
Parameters
----------
order_indicator : BaseOrderIndicator
the order indicator to assign.
indicators : List[BaseOrderIndicator]
the list of all inner indicators.
metrics : Union[str, List[str]]
all metrics needs to be sumed.
fill_value : float, optional
fill np.nan with value. By default None.
|
sum_all_indicators
|
python
|
microsoft/qlib
|
qlib/backtest/high_performance_ds.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/high_performance_ds.py
|
MIT
|
def __init__(self, cash: float = 0, position_dict: Dict[str, Union[Dict[str, float], float]] = {}) -> None:
"""Init position by cash and position_dict.
Parameters
----------
cash : float, optional
initial cash in account, by default 0
position_dict : Dict[
stock_id,
Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
]
initial stocks with parameters amount and price,
if there is no price key in the dict of stocks, it will be filled by _fill_stock_value.
by default {}.
"""
super().__init__()
# NOTE: The position dict must be copied!!!
# Otherwise the initial value
self.init_cash = cash
self.position = position_dict.copy()
for stock, value in self.position.items():
if isinstance(value, int):
self.position[stock] = {"amount": value}
self.position["cash"] = cash
# If the stock price information is missing, the account value will not be calculated temporarily
try:
self.position["now_account_value"] = self.calculate_value()
except KeyError:
pass
|
Init position by cash and position_dict.
Parameters
----------
cash : float, optional
initial cash in account, by default 0
position_dict : Dict[
stock_id,
Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
]
initial stocks with parameters amount and price,
if there is no price key in the dict of stocks, it will be filled by _fill_stock_value.
by default {}.
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def fill_stock_value(self, start_time: Union[str, pd.Timestamp], freq: str, last_days: int = 30) -> None:
"""fill the stock value by the close price of latest last_days from qlib.
Parameters
----------
start_time :
the start time of backtest.
freq : str
Frequency
last_days : int, optional
the days to get the latest close price, by default 30.
"""
stock_list = []
for stock, value in self.position.items():
if not isinstance(value, dict):
continue
if value.get("price", None) is None:
stock_list.append(stock)
if len(stock_list) == 0:
return
start_time = pd.Timestamp(start_time)
# note that start time is 2020-01-01 00:00:00 if raw start time is "2020-01-01"
price_end_time = start_time
price_start_time = start_time - timedelta(days=last_days)
price_df = D.features(
stock_list,
["$close"],
price_start_time,
price_end_time,
freq=freq,
disk_cache=True,
).dropna()
price_dict = price_df.groupby(["instrument"], group_keys=False).tail(1)["$close"].to_dict()
if len(price_dict) < len(stock_list):
lack_stock = set(stock_list) - set(price_dict)
raise ValueError(f"{lack_stock} doesn't have close price in qlib in the latest {last_days} days")
for stock in stock_list:
self.position[stock]["price"] = price_dict[stock]
self.position["now_account_value"] = self.calculate_value()
|
fill the stock value by the close price of latest last_days from qlib.
Parameters
----------
start_time :
the start time of backtest.
freq : str
Frequency
last_days : int, optional
the days to get the latest close price, by default 30.
|
fill_stock_value
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def _init_stock(self, stock_id: str, amount: float, price: float | None = None) -> None:
"""
initialization the stock in current position
Parameters
----------
stock_id :
the id of the stock
amount : float
the amount of the stock
price :
the price when buying the init stock
"""
self.position[stock_id] = {}
self.position[stock_id]["amount"] = amount
self.position[stock_id]["price"] = price
self.position[stock_id]["weight"] = 0 # update the weight in the end of the trade date
|
initialization the stock in current position
Parameters
----------
stock_id :
the id of the stock
amount : float
the amount of the stock
price :
the price when buying the init stock
|
_init_stock
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def get_stock_count(self, code: str, bar: str) -> float:
"""the days the account has been hold, it may be used in some special strategies"""
if f"count_{bar}" in self.position[code]:
return self.position[code][f"count_{bar}"]
else:
return 0
|
the days the account has been hold, it may be used in some special strategies
|
get_stock_count
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def get_stock_amount_dict(self) -> dict:
"""generate stock amount dict {stock_id : amount of stock}"""
d = {}
stock_list = self.get_stock_list()
for stock_code in stock_list:
d[stock_code] = self.get_stock_amount(code=stock_code)
return d
|
generate stock amount dict {stock_id : amount of stock}
|
get_stock_amount_dict
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def get_stock_weight_dict(self, only_stock: bool = False) -> dict:
"""get_stock_weight_dict
generate stock weight dict {stock_id : value weight of stock in the position}
it is meaningful in the beginning or the end of each trade date
:param only_stock: If only_stock=True, the weight of each stock in total stock will be returned
If only_stock=False, the weight of each stock in total assets(stock + cash) will be returned
"""
if only_stock:
position_value = self.calculate_stock_value()
else:
position_value = self.calculate_value()
d = {}
stock_list = self.get_stock_list()
for stock_code in stock_list:
d[stock_code] = self.position[stock_code]["amount"] * self.position[stock_code]["price"] / position_value
return d
|
get_stock_weight_dict
generate stock weight dict {stock_id : value weight of stock in the position}
it is meaningful in the beginning or the end of each trade date
:param only_stock: If only_stock=True, the weight of each stock in total stock will be returned
If only_stock=False, the weight of each stock in total assets(stock + cash) will be returned
|
get_stock_weight_dict
|
python
|
microsoft/qlib
|
qlib/backtest/position.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/position.py
|
MIT
|
def get_benchmark_weight(
bench,
start_date=None,
end_date=None,
path=None,
freq="day",
):
"""get_benchmark_weight
get the stock weight distribution of the benchmark
:param bench:
:param start_date:
:param end_date:
:param path:
:param freq:
:return: The weight distribution of the the benchmark described by a pandas dataframe
Every row corresponds to a trading day.
Every column corresponds to a stock.
Every cell represents the strategy.
"""
if not path:
path = Path(C.dpm.get_data_uri(freq)).expanduser() / "raw" / "AIndexMembers" / "weights.csv"
# TODO: the storage of weights should be implemented in a more elegent way
# TODO: The benchmark is not consistent with the filename in instruments.
bench_weight_df = pd.read_csv(path, usecols=["code", "date", "index", "weight"])
bench_weight_df = bench_weight_df[bench_weight_df["index"] == bench]
bench_weight_df["date"] = pd.to_datetime(bench_weight_df["date"])
if start_date is not None:
bench_weight_df = bench_weight_df[bench_weight_df.date >= start_date]
if end_date is not None:
bench_weight_df = bench_weight_df[bench_weight_df.date <= end_date]
bench_stock_weight = bench_weight_df.pivot_table(index="date", columns="code", values="weight") / 100.0
return bench_stock_weight
|
get_benchmark_weight
get the stock weight distribution of the benchmark
:param bench:
:param start_date:
:param end_date:
:param path:
:param freq:
:return: The weight distribution of the the benchmark described by a pandas dataframe
Every row corresponds to a trading day.
Every column corresponds to a stock.
Every cell represents the strategy.
|
get_benchmark_weight
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def get_stock_weight_df(positions):
"""get_stock_weight_df
:param positions: Given a positions from backtest result.
:return: A weight distribution for the position
"""
stock_weight = []
index = []
for date in sorted(positions.keys()):
pos = positions[date]
if isinstance(pos, dict):
pos = Position(position_dict=pos)
index.append(date)
stock_weight.append(pos.get_stock_weight_dict(only_stock=True))
return pd.DataFrame(stock_weight, index=index)
|
get_stock_weight_df
:param positions: Given a positions from backtest result.
:return: A weight distribution for the position
|
get_stock_weight_df
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def decompose_portofolio_weight(stock_weight_df, stock_group_df):
"""decompose_portofolio_weight
'''
:param stock_weight_df: a pandas dataframe to describe the portofolio by weight.
every row corresponds to a day
every column corresponds to a stock.
Here is an example below.
code SH600004 SH600006 SH600017 SH600022 SH600026 SH600037 \
date
2016-01-05 0.001543 0.001570 0.002732 0.001320 0.003000 NaN
2016-01-06 0.001538 0.001569 0.002770 0.001417 0.002945 NaN
....
:param stock_group_df: a pandas dataframe to describe the stock group.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the group id.
Here is a example by for stock_group_df for industry. The value is the industry code
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 \
datetime
2016-01-05 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-06 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
...
:return: Two dict will be returned. The group_weight and the stock_weight_in_group.
The key is the group. The value is a Series or Dataframe to describe the weight of group or weight of stock
"""
all_group = np.unique(stock_group_df.values.flatten())
all_group = all_group[~np.isnan(all_group)]
group_weight = {}
stock_weight_in_group = {}
for group_key in all_group:
group_mask = stock_group_df == group_key
group_weight[group_key] = stock_weight_df[group_mask].sum(axis=1)
stock_weight_in_group[group_key] = stock_weight_df[group_mask].divide(group_weight[group_key], axis=0)
return group_weight, stock_weight_in_group
|
decompose_portofolio_weight
'''
:param stock_weight_df: a pandas dataframe to describe the portofolio by weight.
every row corresponds to a day
every column corresponds to a stock.
Here is an example below.
code SH600004 SH600006 SH600017 SH600022 SH600026 SH600037 date
2016-01-05 0.001543 0.001570 0.002732 0.001320 0.003000 NaN
2016-01-06 0.001538 0.001569 0.002770 0.001417 0.002945 NaN
....
:param stock_group_df: a pandas dataframe to describe the stock group.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the group id.
Here is a example by for stock_group_df for industry. The value is the industry code
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 datetime
2016-01-05 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-06 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
...
:return: Two dict will be returned. The group_weight and the stock_weight_in_group.
The key is the group. The value is a Series or Dataframe to describe the weight of group or weight of stock
|
decompose_portofolio_weight
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def decompose_portofolio(stock_weight_df, stock_group_df, stock_ret_df):
"""
:param stock_weight_df: a pandas dataframe to describe the portofolio by weight.
every row corresponds to a day
every column corresponds to a stock.
Here is an example below.
code SH600004 SH600006 SH600017 SH600022 SH600026 SH600037 \
date
2016-01-05 0.001543 0.001570 0.002732 0.001320 0.003000 NaN
2016-01-06 0.001538 0.001569 0.002770 0.001417 0.002945 NaN
2016-01-07 0.001555 0.001546 0.002772 0.001393 0.002904 NaN
2016-01-08 0.001564 0.001527 0.002791 0.001506 0.002948 NaN
2016-01-11 0.001597 0.001476 0.002738 0.001493 0.003043 NaN
....
:param stock_group_df: a pandas dataframe to describe the stock group.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the group id.
Here is a example by for stock_group_df for industry. The value is the industry code
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 \
datetime
2016-01-05 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-06 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-07 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-08 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-11 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
...
:param stock_ret_df: a pandas dataframe to describe the stock return.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the return of the group.
Here is a example by for stock_ret_df.
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 \
datetime
2016-01-05 0.007795 0.022070 0.099099 0.024707 0.009473 0.016216
2016-01-06 -0.032597 -0.075205 -0.098361 -0.098985 -0.099707 -0.098936
2016-01-07 -0.001142 0.022544 0.100000 0.004225 0.000651 0.047226
2016-01-08 -0.025157 -0.047244 -0.038567 -0.098177 -0.099609 -0.074408
2016-01-11 0.023460 0.004959 -0.034384 0.018663 0.014461 0.010962
...
:return: It will decompose the portofolio to the group weight and group return.
"""
all_group = np.unique(stock_group_df.values.flatten())
all_group = all_group[~np.isnan(all_group)]
group_weight, stock_weight_in_group = decompose_portofolio_weight(stock_weight_df, stock_group_df)
group_ret = {}
for group_key, val in stock_weight_in_group.items():
stock_weight_in_group_start_date = min(val.index)
stock_weight_in_group_end_date = max(val.index)
temp_stock_ret_df = stock_ret_df[
(stock_ret_df.index >= stock_weight_in_group_start_date)
& (stock_ret_df.index <= stock_weight_in_group_end_date)
]
group_ret[group_key] = (temp_stock_ret_df * val).sum(axis=1)
# If no weight is assigned, then the return of group will be np.nan
group_ret[group_key][group_weight[group_key] == 0.0] = np.nan
group_weight_df = pd.DataFrame(group_weight)
group_ret_df = pd.DataFrame(group_ret)
return group_weight_df, group_ret_df
|
:param stock_weight_df: a pandas dataframe to describe the portofolio by weight.
every row corresponds to a day
every column corresponds to a stock.
Here is an example below.
code SH600004 SH600006 SH600017 SH600022 SH600026 SH600037 date
2016-01-05 0.001543 0.001570 0.002732 0.001320 0.003000 NaN
2016-01-06 0.001538 0.001569 0.002770 0.001417 0.002945 NaN
2016-01-07 0.001555 0.001546 0.002772 0.001393 0.002904 NaN
2016-01-08 0.001564 0.001527 0.002791 0.001506 0.002948 NaN
2016-01-11 0.001597 0.001476 0.002738 0.001493 0.003043 NaN
....
:param stock_group_df: a pandas dataframe to describe the stock group.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the group id.
Here is a example by for stock_group_df for industry. The value is the industry code
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 datetime
2016-01-05 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-06 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-07 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-08 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
2016-01-11 801780.0 801170.0 801040.0 801880.0 801180.0 801160.0
...
:param stock_ret_df: a pandas dataframe to describe the stock return.
every row corresponds to a day
every column corresponds to a stock.
the value in the cell repreponds the return of the group.
Here is a example by for stock_ret_df.
instrument SH600000 SH600004 SH600005 SH600006 SH600007 SH600008 datetime
2016-01-05 0.007795 0.022070 0.099099 0.024707 0.009473 0.016216
2016-01-06 -0.032597 -0.075205 -0.098361 -0.098985 -0.099707 -0.098936
2016-01-07 -0.001142 0.022544 0.100000 0.004225 0.000651 0.047226
2016-01-08 -0.025157 -0.047244 -0.038567 -0.098177 -0.099609 -0.074408
2016-01-11 0.023460 0.004959 -0.034384 0.018663 0.014461 0.010962
...
:return: It will decompose the portofolio to the group weight and group return.
|
decompose_portofolio
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def get_daily_bin_group(bench_values, stock_values, group_n):
"""get_daily_bin_group
Group the values of the stocks of benchmark into several bins in a day.
Put the stocks into these bins.
:param bench_values: A series contains the value of stocks in benchmark.
The index is the stock code.
:param stock_values: A series contains the value of stocks of your portofolio
The index is the stock code.
:param group_n: Bins will be produced
:return: A series with the same size and index as the stock_value.
The value in the series is the group id of the bins.
The No.1 bin contains the biggest values.
"""
stock_group = stock_values.copy()
# get the bin split points based on the daily proportion of benchmark
split_points = np.percentile(bench_values[~bench_values.isna()], np.linspace(0, 100, group_n + 1))
# Modify the biggest uppper bound and smallest lowerbound
split_points[0], split_points[-1] = -np.inf, np.inf
for i, (lb, up) in enumerate(zip(split_points, split_points[1:])):
stock_group.loc[stock_values[(stock_values >= lb) & (stock_values < up)].index] = group_n - i
return stock_group
|
get_daily_bin_group
Group the values of the stocks of benchmark into several bins in a day.
Put the stocks into these bins.
:param bench_values: A series contains the value of stocks in benchmark.
The index is the stock code.
:param stock_values: A series contains the value of stocks of your portofolio
The index is the stock code.
:param group_n: Bins will be produced
:return: A series with the same size and index as the stock_value.
The value in the series is the group id of the bins.
The No.1 bin contains the biggest values.
|
get_daily_bin_group
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def brinson_pa(
positions,
bench="SH000905",
group_field="industry",
group_method="category",
group_n=None,
deal_price="vwap",
freq="day",
):
"""brinson profit attribution
:param positions: The position produced by the backtest class
:param bench: The benchmark for comparing. TODO: if no benchmark is set, the equal-weighted is used.
:param group_field: The field used to set the group for assets allocation.
`industry` and `market_value` is often used.
:param group_method: 'category' or 'bins'. The method used to set the group for asstes allocation
`bin` will split the value into `group_n` bins and each bins represents a group
:param group_n: . Only used when group_method == 'bins'.
:return:
A dataframe with three columns: RAA(excess Return of Assets Allocation), RSS(excess Return of Stock Selectino), RTotal(Total excess Return)
Every row corresponds to a trading day, the value corresponds to the next return for this trading day
The middle info of brinson profit attribution
"""
# group_method will decide how to group the group_field.
dates = sorted(positions.keys())
start_date, end_date = min(dates), max(dates)
bench_stock_weight = get_benchmark_weight(bench, start_date, end_date, freq)
# The attributes for allocation will not
if not group_field.startswith("$"):
group_field = "$" + group_field
if not deal_price.startswith("$"):
deal_price = "$" + deal_price
# FIXME: In current version. Some attributes(such as market_value) of some
# suspend stock is NAN. So we have to get more date to forward fill the NAN
shift_start_date = start_date - datetime.timedelta(days=250)
instruments = D.list_instruments(
D.instruments(market="all"),
start_time=shift_start_date,
end_time=end_date,
as_list=True,
freq=freq,
)
stock_df = D.features(
instruments,
[group_field, deal_price],
start_time=shift_start_date,
end_time=end_date,
freq=freq,
)
stock_df.columns = [group_field, "deal_price"]
stock_group_field = stock_df[group_field].unstack().T
# FIXME: some attributes of some suspend stock is NAN.
stock_group_field = stock_group_field.fillna(method="ffill")
stock_group_field = stock_group_field.loc[start_date:end_date]
stock_group = get_stock_group(stock_group_field, bench_stock_weight, group_method, group_n)
deal_price_df = stock_df["deal_price"].unstack().T
deal_price_df = deal_price_df.fillna(method="ffill")
# NOTE:
# The return will be slightly different from the of the return in the report.
# Here the position are adjusted at the end of the trading day with close
stock_ret = (deal_price_df - deal_price_df.shift(1)) / deal_price_df.shift(1)
stock_ret = stock_ret.shift(-1).loc[start_date:end_date]
port_stock_weight_df = get_stock_weight_df(positions)
# decomposing the portofolio
port_group_weight_df, port_group_ret_df = decompose_portofolio(port_stock_weight_df, stock_group, stock_ret)
bench_group_weight_df, bench_group_ret_df = decompose_portofolio(bench_stock_weight, stock_group, stock_ret)
# if the group return of the portofolio is NaN, replace it with the market
# value
mod_port_group_ret_df = port_group_ret_df.copy()
mod_port_group_ret_df[mod_port_group_ret_df.isna()] = bench_group_ret_df
Q1 = (bench_group_weight_df * bench_group_ret_df).sum(axis=1)
Q2 = (port_group_weight_df * bench_group_ret_df).sum(axis=1)
Q3 = (bench_group_weight_df * mod_port_group_ret_df).sum(axis=1)
Q4 = (port_group_weight_df * mod_port_group_ret_df).sum(axis=1)
return (
pd.DataFrame(
{
"RAA": Q2 - Q1, # The excess profit from the assets allocation
"RSS": Q3 - Q1, # The excess profit from the stocks selection
# The excess profit from the interaction of assets allocation and stocks selection
"RIN": Q4 - Q3 - Q2 + Q1,
"RTotal": Q4 - Q1, # The totoal excess profit
},
),
{
"port_group_ret": port_group_ret_df,
"port_group_weight": port_group_weight_df,
"bench_group_ret": bench_group_ret_df,
"bench_group_weight": bench_group_weight_df,
"stock_group": stock_group,
"bench_stock_weight": bench_stock_weight,
"port_stock_weight": port_stock_weight_df,
"stock_ret": stock_ret,
},
)
|
brinson profit attribution
:param positions: The position produced by the backtest class
:param bench: The benchmark for comparing. TODO: if no benchmark is set, the equal-weighted is used.
:param group_field: The field used to set the group for assets allocation.
`industry` and `market_value` is often used.
:param group_method: 'category' or 'bins'. The method used to set the group for asstes allocation
`bin` will split the value into `group_n` bins and each bins represents a group
:param group_n: . Only used when group_method == 'bins'.
:return:
A dataframe with three columns: RAA(excess Return of Assets Allocation), RSS(excess Return of Stock Selectino), RTotal(Total excess Return)
Every row corresponds to a trading day, the value corresponds to the next return for this trading day
The middle info of brinson profit attribution
|
brinson_pa
|
python
|
microsoft/qlib
|
qlib/backtest/profit_attribution.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/profit_attribution.py
|
MIT
|
def load_portfolio_metrics(self, path: str) -> None:
"""load pm from a file
should have format like
columns = ['account', 'return', 'total_turnover', 'turnover', 'cost', 'total_cost', 'value', 'cash', 'bench']
:param
path: str/ pathlib.Path()
"""
with pathlib.Path(path).open("rb") as f:
r = pd.read_csv(f, index_col=0)
r.index = pd.DatetimeIndex(r.index)
index = r.index
self.init_vars()
for trade_start_time in index:
self.update_portfolio_metrics_record(
trade_start_time=trade_start_time,
account_value=r.loc[trade_start_time]["account"],
cash=r.loc[trade_start_time]["cash"],
return_rate=r.loc[trade_start_time]["return"],
total_turnover=r.loc[trade_start_time]["total_turnover"],
turnover_rate=r.loc[trade_start_time]["turnover"],
total_cost=r.loc[trade_start_time]["total_cost"],
cost_rate=r.loc[trade_start_time]["cost"],
stock_value=r.loc[trade_start_time]["value"],
bench_value=r.loc[trade_start_time]["bench"],
)
|
load pm from a file
should have format like
columns = ['account', 'return', 'total_turnover', 'turnover', 'cost', 'total_cost', 'value', 'cash', 'bench']
:param
path: str/ pathlib.Path()
|
load_portfolio_metrics
|
python
|
microsoft/qlib
|
qlib/backtest/report.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/report.py
|
MIT
|
def _get_base_vol_pri(
self,
inst: str,
trade_start_time: pd.Timestamp,
trade_end_time: pd.Timestamp,
direction: OrderDir,
decision: BaseTradeDecision,
trade_exchange: Exchange,
pa_config: dict = {},
) -> Tuple[Optional[float], Optional[float]]:
"""
Get the base volume and price information
All the base price values are rooted from this function
"""
agg = pa_config.get("agg", "twap").lower()
price = pa_config.get("price", "deal_price").lower()
if decision.trade_range is not None:
trade_start_time, trade_end_time = decision.trade_range.clip_time_range(
start_time=trade_start_time,
end_time=trade_end_time,
)
if price == "deal_price":
price_s = trade_exchange.get_deal_price(
inst,
trade_start_time,
trade_end_time,
direction=direction,
method=None,
)
else:
raise NotImplementedError(f"This type of input is not supported")
# if there is no stock data during the time period
if price_s is None:
return None, None
if isinstance(price_s, (int, float, np.number)):
price_s = idd.SingleData(price_s, [trade_start_time])
elif isinstance(price_s, idd.SingleData):
pass
else:
raise NotImplementedError(f"This type of input is not supported")
# NOTE: there are some zeros in the trading price. These cases are known meaningless
# for aligning the previous logic, remove it.
# remove zero and negative values.
assert isinstance(price_s, idd.SingleData)
price_s = price_s.loc[(price_s > 1e-08).data.astype(bool)]
# NOTE ~(price_s < 1e-08) is different from price_s >= 1e-8
# ~(np.nan < 1e-8) -> ~(False) -> True
# if price_s is empty
if price_s.empty:
return None, None
assert isinstance(price_s, idd.SingleData)
if agg == "vwap":
volume_s = trade_exchange.get_volume(inst, trade_start_time, trade_end_time, method=None)
if isinstance(volume_s, (int, float, np.number)):
volume_s = idd.SingleData(volume_s, [trade_start_time])
assert isinstance(volume_s, idd.SingleData)
volume_s = volume_s.reindex(price_s.index)
elif agg == "twap":
volume_s = idd.SingleData(1, price_s.index)
else:
raise NotImplementedError(f"This type of input is not supported")
assert isinstance(volume_s, idd.SingleData)
base_volume = volume_s.sum()
base_price = (price_s * volume_s).sum() / base_volume
return base_price, base_volume
|
Get the base volume and price information
All the base price values are rooted from this function
|
_get_base_vol_pri
|
python
|
microsoft/qlib
|
qlib/backtest/report.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/report.py
|
MIT
|
def _agg_base_price(
self,
inner_order_indicators: List[BaseOrderIndicator],
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]],
trade_exchange: Exchange,
pa_config: dict = {},
) -> None:
"""
# NOTE:!!!!
# Strong assumption!!!!!!
# the correctness of the base_price relies on that the **same** exchange is used
Parameters
----------
inner_order_indicators : List[BaseOrderIndicator]
the indicators of account of inner executor
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]],
a list of decisions according to inner_order_indicators
trade_exchange : Exchange
for retrieving trading price
pa_config : dict
For example
{
"agg": "twap", # "vwap"
"price": "$close", # TODO: this is not supported now!!!!!
# default to use deal price of the exchange
}
"""
# TODO: I think there are potentials to be optimized
trade_dir = self.order_indicator.get_index_data("trade_dir")
if len(trade_dir) > 0:
bp_all, bv_all = [], []
# <step, inst, (base_volume | base_price)>
for oi, (dec, start, end) in zip(inner_order_indicators, decision_list):
bp_s = oi.get_index_data("base_price").reindex(trade_dir.index)
bv_s = oi.get_index_data("base_volume").reindex(trade_dir.index)
bp_new, bv_new = {}, {}
for pr, v, (inst, direction) in zip(bp_s.data, bv_s.data, zip(trade_dir.index, trade_dir.data)):
if np.isnan(pr):
bp_tmp, bv_tmp = self._get_base_vol_pri(
inst,
start,
end,
decision=dec,
direction=direction,
trade_exchange=trade_exchange,
pa_config=pa_config,
)
if (bp_tmp is not None) and (bv_tmp is not None):
bp_new[inst], bv_new[inst] = bp_tmp, bv_tmp
else:
bp_new[inst], bv_new[inst] = pr, v
bp_new = idd.SingleData(bp_new)
bv_new = idd.SingleData(bv_new)
bp_all.append(bp_new)
bv_all.append(bv_new)
bp_all_multi_data = idd.concat(bp_all, axis=1)
bv_all_multi_data = idd.concat(bv_all, axis=1)
base_volume = bv_all_multi_data.sum(axis=1)
self.order_indicator.assign("base_volume", base_volume.to_dict())
self.order_indicator.assign(
"base_price",
((bp_all_multi_data * bv_all_multi_data).sum(axis=1) / base_volume).to_dict(),
)
|
# NOTE:!!!!
# Strong assumption!!!!!!
# the correctness of the base_price relies on that the **same** exchange is used
Parameters
----------
inner_order_indicators : List[BaseOrderIndicator]
the indicators of account of inner executor
decision_list: List[Tuple[BaseTradeDecision, pd.Timestamp, pd.Timestamp]],
a list of decisions according to inner_order_indicators
trade_exchange : Exchange
for retrieving trading price
pa_config : dict
For example
{
"agg": "twap", # "vwap"
"price": "$close", # TODO: this is not supported now!!!!!
# default to use deal price of the exchange
}
|
_agg_base_price
|
python
|
microsoft/qlib
|
qlib/backtest/report.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/report.py
|
MIT
|
def get_signal(self, start_time: pd.Timestamp, end_time: pd.Timestamp) -> Union[pd.Series, pd.DataFrame, None]:
"""
get the signal at the end of the decision step(from `start_time` to `end_time`)
Returns
-------
Union[pd.Series, pd.DataFrame, None]:
returns None if no signal in the specific day
"""
|
get the signal at the end of the decision step(from `start_time` to `end_time`)
Returns
-------
Union[pd.Series, pd.DataFrame, None]:
returns None if no signal in the specific day
|
get_signal
|
python
|
microsoft/qlib
|
qlib/backtest/signal.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/signal.py
|
MIT
|
def _update_model(self) -> None:
"""
When using online data, update model in each bar as the following steps:
- update dataset with online data, the dataset should support online update
- make the latest prediction scores of the new bar
- update the pred score into the latest prediction
"""
# TODO: this method is not included in the framework and could be refactor later
raise NotImplementedError("_update_model is not implemented!")
|
When using online data, update model in each bar as the following steps:
- update dataset with online data, the dataset should support online update
- make the latest prediction scores of the new bar
- update the pred score into the latest prediction
|
_update_model
|
python
|
microsoft/qlib
|
qlib/backtest/signal.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/signal.py
|
MIT
|
def create_signal_from(
obj: Union[Signal, Tuple[BaseModel, Dataset], List, Dict, Text, pd.Series, pd.DataFrame],
) -> Signal:
"""
create signal from diverse information
This method will choose the right method to create a signal based on `obj`
Please refer to the code below.
"""
if isinstance(obj, Signal):
return obj
elif isinstance(obj, (tuple, list)):
return ModelSignal(*obj)
elif isinstance(obj, (dict, str)):
return init_instance_by_config(obj)
elif isinstance(obj, (pd.DataFrame, pd.Series)):
return SignalWCache(signal=obj)
else:
raise NotImplementedError(f"This type of signal is not supported")
|
create signal from diverse information
This method will choose the right method to create a signal based on `obj`
Please refer to the code below.
|
create_signal_from
|
python
|
microsoft/qlib
|
qlib/backtest/signal.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/signal.py
|
MIT
|
def __init__(
self,
freq: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
level_infra: LevelInfrastructure | None = None,
) -> None:
"""
Parameters
----------
freq : str
frequency of trading calendar, also trade time per trading step
start_time : Union[str, pd.Timestamp], optional
closed start of the trading calendar, by default None
If `start_time` is None, it must be reset before trading.
end_time : Union[str, pd.Timestamp], optional
closed end of the trade time range, by default None
If `end_time` is None, it must be reset before trading.
"""
self.level_infra = level_infra
self.reset(freq=freq, start_time=start_time, end_time=end_time)
|
Parameters
----------
freq : str
frequency of trading calendar, also trade time per trading step
start_time : Union[str, pd.Timestamp], optional
closed start of the trading calendar, by default None
If `start_time` is None, it must be reset before trading.
end_time : Union[str, pd.Timestamp], optional
closed end of the trade time range, by default None
If `end_time` is None, it must be reset before trading.
|
__init__
|
python
|
microsoft/qlib
|
qlib/backtest/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/utils.py
|
MIT
|
def reset(
self,
freq: str,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
) -> None:
"""
Please refer to the docs of `__init__`
Reset the trade calendar
- self.trade_len : The total count for trading step
- self.trade_step : The number of trading step finished, self.trade_step can be
[0, 1, 2, ..., self.trade_len - 1]
"""
self.freq = freq
self.start_time = pd.Timestamp(start_time) if start_time else None
self.end_time = pd.Timestamp(end_time) if end_time else None
_calendar = Cal.calendar(freq=freq, future=True)
assert isinstance(_calendar, np.ndarray)
self._calendar = _calendar
_, _, _start_index, _end_index = Cal.locate_index(start_time, end_time, freq=freq, future=True)
self.start_index = _start_index
self.end_index = _end_index
self.trade_len = _end_index - _start_index + 1
self.trade_step = 0
|
Please refer to the docs of `__init__`
Reset the trade calendar
- self.trade_len : The total count for trading step
- self.trade_step : The number of trading step finished, self.trade_step can be
[0, 1, 2, ..., self.trade_len - 1]
|
reset
|
python
|
microsoft/qlib
|
qlib/backtest/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/utils.py
|
MIT
|
def get_step_time(self, trade_step: int | None = None, shift: int = 0) -> Tuple[pd.Timestamp, pd.Timestamp]:
"""
Get the left and right endpoints of the trade_step'th trading interval
About the endpoints:
- Qlib uses the closed interval in time-series data selection, which has the same performance as
pandas.Series.loc
# - The returned right endpoints should minus 1 seconds because of the closed interval representation in
# Qlib.
# Note: Qlib supports up to minutely decision execution, so 1 seconds is less than any trading time
# interval.
Parameters
----------
trade_step : int, optional
the number of trading step finished, by default None to indicate current step
shift : int, optional
shift bars , by default 0
Returns
-------
Tuple[pd.Timestamp, pd.Timestamp]
- If shift == 0, return the trading time range
- If shift > 0, return the trading time range of the earlier shift bars
- If shift < 0, return the trading time range of the later shift bar
"""
if trade_step is None:
trade_step = self.get_trade_step()
calendar_index = self.start_index + trade_step - shift
return self._calendar[calendar_index], epsilon_change(self._calendar[calendar_index + 1])
|
Get the left and right endpoints of the trade_step'th trading interval
About the endpoints:
- Qlib uses the closed interval in time-series data selection, which has the same performance as
pandas.Series.loc
# - The returned right endpoints should minus 1 seconds because of the closed interval representation in
# Qlib.
# Note: Qlib supports up to minutely decision execution, so 1 seconds is less than any trading time
# interval.
Parameters
----------
trade_step : int, optional
the number of trading step finished, by default None to indicate current step
shift : int, optional
shift bars , by default 0
Returns
-------
Tuple[pd.Timestamp, pd.Timestamp]
- If shift == 0, return the trading time range
- If shift > 0, return the trading time range of the earlier shift bars
- If shift < 0, return the trading time range of the later shift bar
|
get_step_time
|
python
|
microsoft/qlib
|
qlib/backtest/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/utils.py
|
MIT
|
def get_range_idx(self, start_time: pd.Timestamp, end_time: pd.Timestamp) -> Tuple[int, int]:
"""
get the range index which involve start_time~end_time (both sides are closed)
Parameters
----------
start_time : pd.Timestamp
end_time : pd.Timestamp
Returns
-------
Tuple[int, int]:
the index of the range. **the left and right are closed**
"""
left = int(np.searchsorted(self._calendar, start_time, side="right") - 1)
right = int(np.searchsorted(self._calendar, end_time, side="right") - 1)
left -= self.start_index
right -= self.start_index
def clip(idx: int) -> int:
return min(max(0, idx), self.trade_len - 1)
return clip(left), clip(right)
|
get the range index which involve start_time~end_time (both sides are closed)
Parameters
----------
start_time : pd.Timestamp
end_time : pd.Timestamp
Returns
-------
Tuple[int, int]:
the index of the range. **the left and right are closed**
|
get_range_idx
|
python
|
microsoft/qlib
|
qlib/backtest/utils.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/utils.py
|
MIT
|
def create_account_instance(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
benchmark: Optional[str],
account: Union[float, int, dict],
pos_type: str = "Position",
) -> Account:
"""
# TODO: is very strange pass benchmark_config in the account (maybe for report)
# There should be a post-step to process the report.
Parameters
----------
start_time
start time of the benchmark
end_time
end time of the benchmark
benchmark : str
the benchmark for reporting
account : Union[
float,
{
"cash": float,
"stock1": Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
},
]
information for describing how to creating the account
For `float`:
Using Account with only initial cash
For `dict`:
key "cash" means initial cash.
key "stock1" means the information of first stock with amount and price(optional).
...
pos_type: str
Postion type.
"""
if isinstance(account, (int, float)):
init_cash = account
position_dict = {}
elif isinstance(account, dict):
init_cash = account.pop("cash")
position_dict = account
else:
raise ValueError("account must be in (int, float, dict)")
return Account(
init_cash=init_cash,
position_dict=position_dict,
pos_type=pos_type,
benchmark_config=(
{}
if benchmark is None
else {
"benchmark": benchmark,
"start_time": start_time,
"end_time": end_time,
}
),
)
|
# TODO: is very strange pass benchmark_config in the account (maybe for report)
# There should be a post-step to process the report.
Parameters
----------
start_time
start time of the benchmark
end_time
end time of the benchmark
benchmark : str
the benchmark for reporting
account : Union[
float,
{
"cash": float,
"stock1": Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
},
]
information for describing how to creating the account
For `float`:
Using Account with only initial cash
For `dict`:
key "cash" means initial cash.
key "stock1" means the information of first stock with amount and price(optional).
...
pos_type: str
Postion type.
|
create_account_instance
|
python
|
microsoft/qlib
|
qlib/backtest/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/__init__.py
|
MIT
|
def backtest(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
strategy: Union[str, dict, object, Path],
executor: Union[str, dict, object, Path],
benchmark: str = "SH000300",
account: Union[float, int, dict] = 1e9,
exchange_kwargs: dict = {},
pos_type: str = "Position",
) -> Tuple[PORT_METRIC, INDICATOR_METRIC]:
"""initialize the strategy and executor, then backtest function for the interaction of the outermost strategy and
executor in the nested decision execution
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : Union[pd.Timestamp, str]
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
strategy : Union[str, dict, object, Path]
for initializing outermost portfolio strategy. Please refer to the docs of init_instance_by_config for more
information.
executor : Union[str, dict, object, Path]
for initializing the outermost executor.
benchmark: str
the benchmark for reporting.
account : Union[float, int, Position]
information for describing how to create the account
For `float` or `int`:
Using Account with only initial cash
For `Position`:
Using Account with a Position
exchange_kwargs : dict
the kwargs for initializing Exchange
pos_type : str
the type of Position.
Returns
-------
portfolio_dict: PORT_METRIC
it records the trading portfolio_metrics information
indicator_dict: INDICATOR_METRIC
it computes the trading indicator
It is organized in a dict format
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
return backtest_loop(start_time, end_time, trade_strategy, trade_executor)
|
initialize the strategy and executor, then backtest function for the interaction of the outermost strategy and
executor in the nested decision execution
Parameters
----------
start_time : Union[pd.Timestamp, str]
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : Union[pd.Timestamp, str]
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
strategy : Union[str, dict, object, Path]
for initializing outermost portfolio strategy. Please refer to the docs of init_instance_by_config for more
information.
executor : Union[str, dict, object, Path]
for initializing the outermost executor.
benchmark: str
the benchmark for reporting.
account : Union[float, int, Position]
information for describing how to create the account
For `float` or `int`:
Using Account with only initial cash
For `Position`:
Using Account with a Position
exchange_kwargs : dict
the kwargs for initializing Exchange
pos_type : str
the type of Position.
Returns
-------
portfolio_dict: PORT_METRIC
it records the trading portfolio_metrics information
indicator_dict: INDICATOR_METRIC
it computes the trading indicator
It is organized in a dict format
|
backtest
|
python
|
microsoft/qlib
|
qlib/backtest/__init__.py
|
https://github.com/microsoft/qlib/blob/master/qlib/backtest/__init__.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.