repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BayesFlow
|
BayesFlow-master/bayesflow/diagnostics.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.lines import Line2D
from scipy.stats import binom, median_abs_deviation
from sklearn.metrics import confusion_matrix, r2_score
logging.basicConfig()
from bayesflow.computational_utilities import expected_calibration_error, simultaneous_ecdf_bands
from bayesflow.helper_functions import check_posterior_prior_shapes
def plot_recovery(
post_samples,
prior_samples,
point_agg=np.median,
uncertainty_agg=median_abs_deviation,
param_names=None,
fig_size=None,
label_fontsize=16,
title_fontsize=18,
metric_fontsize=16,
tick_fontsize=12,
add_corr=True,
add_r2=True,
color="#8f2727",
n_col=None,
n_row=None,
):
"""Creates and plots publication-ready recovery plot with true vs. point estimate + uncertainty.
The point estimate can be controlled with the ``point_agg`` argument, and the uncertainty estimate
can be controlled with the ``uncertainty_agg`` argument.
This plot yields similar information as the "posterior z-score", but allows for generic
point and uncertainty estimates:
https://betanalpha.github.io/assets/case_studies/principled_bayesian_workflow.html
Important: Posterior aggregates play no special role in Bayesian inference and should only
be used heuristically. For instanec, in the case of multi-modal posteriors, common point
estimates, such as mean, (geometric) median, or maximum a posteriori (MAP) mean nothing.
Parameters
----------
post_samples : np.ndarray of shape (n_data_sets, n_post_draws, n_params)
The posterior draws obtained from n_data_sets
prior_samples : np.ndarray of shape (n_data_sets, n_params)
The prior draws (true parameters) obtained for generating the n_data_sets
point_agg : callable, optional, default: np.median
The function to apply to the posterior draws to get a point estimate for each marginal.
The default computes the marginal median for each marginal posterior as a robust
point estimate.
uncertainty_agg : callable or None, optional, default: scipy.stats.median_abs_deviation
The function to apply to the posterior draws to get an uncertainty estimate.
If ``None`` provided, a simple scatter using only ``point_agg`` will be plotted.
param_names : list or None, optional, default: None
The parameter names for nice plot titles. Inferred if None
fig_size : tuple or None, optional, default : None
The figure size passed to the matplotlib constructor. Inferred if None.
label_fontsize : int, optional, default: 14
The font size of the y-label text
title_fontsize : int, optional, default: 16
The font size of the title text
metric_fontsize : int, optional, default: 16
The font size of the goodness-of-fit metric (if provided)
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
add_corr : bool, optional, default: True
A flag for adding correlation between true and estimates to the plot
add_r2 : bool, optional, default: True
A flag for adding R^2 between true and estimates to the plot
color : str, optional, default: '#8f2727'
The color for the true vs. estimated scatter points and errobars
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
ShapeError
If there is a deviation form the expected shapes of ``post_samples`` and ``prior_samples``.
"""
# Sanity check
check_posterior_prior_shapes(post_samples, prior_samples)
# Compute point estimates and uncertainties
est = point_agg(post_samples, axis=1)
if uncertainty_agg is not None:
u = uncertainty_agg(post_samples, axis=1)
# Determine n params and param names if None given
n_params = prior_samples.shape[-1]
if param_names is None:
param_names = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
# Determine number of rows and columns for subplots based on inputs
if n_row is None and n_col is None:
n_row = int(np.ceil(n_params / 6))
n_col = int(np.ceil(n_params / n_row))
elif n_row is None and n_col is not None:
n_row = int(np.ceil(n_params / n_col))
elif n_row is not None and n_col is None:
n_col = int(np.ceil(n_params / n_row))
# Initialize figure
if fig_size is None:
fig_size = (int(4 * n_col), int(4 * n_row))
f, axarr = plt.subplots(n_row, n_col, figsize=fig_size)
# turn axarr into 1D list
if n_col > 1 or n_row > 1:
axarr_it = axarr.flat
else:
# for 1x1, axarr is not a list -> turn it into one for use with enumerate
axarr_it = [axarr]
for i, ax in enumerate(axarr_it):
if i >= n_params:
break
# Add scatter and errorbars
if uncertainty_agg is not None:
_ = ax.errorbar(prior_samples[:, i], est[:, i], yerr=u[:, i], fmt="o", alpha=0.5, color=color)
else:
_ = ax.scatter(prior_samples[:, i], est[:, i], alpha=0.5, color=color)
# Make plots quadratic to avoid visual illusions
lower = min(prior_samples[:, i].min(), est[:, i].min())
upper = max(prior_samples[:, i].max(), est[:, i].max())
eps = (upper - lower) * 0.1
ax.set_xlim([lower - eps, upper + eps])
ax.set_ylim([lower - eps, upper + eps])
ax.plot(
[ax.get_xlim()[0], ax.get_xlim()[1]],
[ax.get_ylim()[0], ax.get_ylim()[1]],
color="black",
alpha=0.9,
linestyle="dashed",
)
# Add optional metrics and title
if add_r2:
r2 = r2_score(prior_samples[:, i], est[:, i])
ax.text(
0.1,
0.9,
"$R^2$ = {:.3f}".format(r2),
horizontalalignment="left",
verticalalignment="center",
transform=ax.transAxes,
size=metric_fontsize,
)
if add_corr:
corr = np.corrcoef(prior_samples[:, i], est[:, i])[0, 1]
ax.text(
0.1,
0.8,
"$r$ = {:.3f}".format(corr),
horizontalalignment="left",
verticalalignment="center",
transform=ax.transAxes,
size=metric_fontsize,
)
ax.set_title(param_names[i], fontsize=title_fontsize)
# Prettify
sns.despine(ax=ax)
ax.grid(alpha=0.5)
ax.tick_params(axis="both", which="major", labelsize=tick_fontsize)
ax.tick_params(axis="both", which="minor", labelsize=tick_fontsize)
# Only add x-labels to the bottom row
bottom_row = axarr if n_row == 1 else axarr[0] if n_col == 1 else axarr[n_row - 1, :]
for _ax in bottom_row:
_ax.set_xlabel("Ground truth", fontsize=label_fontsize)
# Only add y-labels to right left-most row
if n_row == 1: # if there is only one row, the ax array is 1D
axarr[0].set_ylabel("Estimated", fontsize=label_fontsize)
# If there is more than one row, the ax array is 2D
else:
for _ax in axarr[:, 0]:
_ax.set_ylabel("Estimated", fontsize=label_fontsize)
# Remove unused axes entirely
for _ax in axarr_it[n_params:]:
_ax.remove()
f.tight_layout()
return f
def plot_z_score_contraction(
post_samples,
prior_samples,
param_names=None,
fig_size=None,
label_fontsize=16,
title_fontsize=18,
tick_fontsize=12,
color="#8f2727",
n_col=None,
n_row=None,
):
"""Implements a graphical check for global model sensitivity by plotting the posterior
z-score over the posterior contraction for each set of posterior samples in ``post_samples``
according to [1].
- The definition of the posterior z-score is:
post_z_score = (posterior_mean - true_parameters) / posterior_std
And the score is adequate if it centers around zero and spreads roughly in the interval [-3, 3]
- The definition of posterior contraction is:
post_contraction = 1 - (posterior_variance / prior_variance)
In other words, the posterior is a proxy for the reduction in ucnertainty gained by
replacing the prior with the posterior. The ideal posterior contraction tends to 1.
Contraction near zero indicates that the posterior variance is almost identical to
the prior variance for the particular marginal parameter distribution.
Note: Means and variances will be estimated vie their sample-based estimators.
[1] Schad, D. J., Betancourt, M., & Vasishth, S. (2021).
Toward a principled Bayesian workflow in cognitive science.
Psychological methods, 26(1), 103.
Also available at https://arxiv.org/abs/1904.12765
Parameters
----------
post_samples : np.ndarray of shape (n_data_sets, n_post_draws, n_params)
The posterior draws obtained from n_data_sets
prior_samples : np.ndarray of shape (n_data_sets, n_params)
The prior draws (true parameters) obtained for generating the n_data_sets
param_names : list or None, optional, default: None
The parameter names for nice plot titles. Inferred if None
fig_size : tuple or None, optional, default : None
The figure size passed to the matplotlib constructor. Inferred if None.
label_fontsize : int, optional, default: 14
The font size of the y-label text
title_fontsize : int, optional, default: 16
The font size of the title text
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
color : str, optional, default: '#8f2727'
The color for the true vs. estimated scatter points and errobars
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
ShapeError
If there is a deviation form the expected shapes of ``post_samples`` and ``prior_samples``.
"""
# Sanity check for shape integrity
check_posterior_prior_shapes(post_samples, prior_samples)
# Estimate posterior means and stds
post_means = post_samples.mean(axis=1)
post_stds = post_samples.std(axis=1, ddof=1)
post_vars = post_samples.var(axis=1, ddof=1)
# Estimate prior variance
prior_vars = prior_samples.var(axis=0, keepdims=True, ddof=1)
# Compute contraction
post_cont = 1 - (post_vars / prior_vars)
# Compute posterior z score
z_score = (post_means - prior_samples) / post_stds
# Determine number of params and param names if None given
n_params = prior_samples.shape[-1]
if param_names is None:
param_names = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
# Determine number of rows and columns for subplots based on inputs
if n_row is None and n_col is None:
n_row = int(np.ceil(n_params / 6))
n_col = int(np.ceil(n_params / n_row))
elif n_row is None and n_col is not None:
n_row = int(np.ceil(n_params / n_col))
elif n_row is not None and n_col is None:
n_col = int(np.ceil(n_params / n_row))
# Initialize figure
if fig_size is None:
fig_size = (int(4 * n_col), int(4 * n_row))
f, axarr = plt.subplots(n_row, n_col, figsize=fig_size)
# turn axarr into 1D list
if n_col > 1 or n_row > 1:
axarr_it = axarr.flat
else:
# for 1x1, axarr is not a list -> turn it into one for use with enumerate
axarr_it = [axarr]
# Loop and plot
for i, ax in enumerate(axarr_it):
if i >= n_params:
break
ax.scatter(post_cont[:, i], z_score[:, i], color=color, alpha=0.5)
ax.set_title(param_names[i], fontsize=title_fontsize)
sns.despine(ax=ax)
ax.grid(alpha=0.5)
ax.tick_params(axis="both", which="major", labelsize=tick_fontsize)
ax.tick_params(axis="both", which="minor", labelsize=tick_fontsize)
ax.set_xlim([-0.05, 1.05])
# Only add x-labels to the bottom row
bottom_row = axarr if n_row == 1 else axarr[0] if n_col == 1 else axarr[n_row - 1, :]
for _ax in bottom_row:
_ax.set_xlabel("Posterior contraction", fontsize=label_fontsize)
# Only add y-labels to right left-most row
if n_row == 1: # if there is only one row, the ax array is 1D
axarr[0].set_ylabel("Posterior z-score", fontsize=label_fontsize)
# If there is more than one row, the ax array is 2D
else:
for _ax in axarr[:, 0]:
_ax.set_ylabel("Posterior z-score", fontsize=label_fontsize)
# Remove unused axes entirely
for _ax in axarr_it[n_params:]:
_ax.remove()
f.tight_layout()
return f
def plot_sbc_ecdf(
post_samples,
prior_samples,
difference=False,
stacked=False,
fig_size=None,
param_names=None,
label_fontsize=16,
legend_fontsize=14,
title_fontsize=18,
tick_fontsize=12,
rank_ecdf_color="#a34f4f",
fill_color="grey",
**kwargs,
):
"""Creates the empirical CDFs for each marginal rank distribution and plots it against
a uniform ECDF. ECDF simultaneous bands are drawn using simulations from the uniform,
as proposed by [1].
For models with many parameters, use `stacked=True` to obtain an idea of the overall calibration
of a posterior approximator.
[1] Säilynoja, T., Bürkner, P. C., & Vehtari, A. (2022). Graphical test for discrete uniformity and
its applications in goodness-of-fit evaluation and multiple sample comparison. Statistics and Computing,
32(2), 1-21. https://arxiv.org/abs/2103.10522
Parameters
----------
post_samples : np.ndarray of shape (n_data_sets, n_post_draws, n_params)
The posterior draws obtained from n_data_sets
prior_samples : np.ndarray of shape (n_data_sets, n_params)
The prior draws obtained for generating n_data_sets
difference : bool, optional, default: False
If `True`, plots the ECDF difference. Enables a more dynamic visualization range.
stacked : bool, optional, default: False
If `True`, all ECDFs will be plotted on the same plot. If `False`, each ECDF will
have its own subplot, similar to the behavior of `plot_sbc_histograms`.
param_names : list or None, optional, default: None
The parameter names for nice plot titles. Inferred if None. Only relevant if `stacked=False`.
fig_size : tuple or None, optional, default: None
The figure size passed to the matplotlib constructor. Inferred if None.
label_fontsize : int, optional, default: 16
The font size of the y-label and y-label texts
legend_fontsize : int, optional, default: 14
The font size of the legend text
title_fontsize : int, optional, default: 16
The font size of the title text. Only relevant if `stacked=False`
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
rank_ecdf_color : str, optional, default: '#a34f4f'
The color to use for the rank ECDFs
fill_color : str, optional, default: 'grey'
The color of the fill arguments.
**kwargs : dict, optional, default: {}
Keyword arguments can be passed to control the behavior of ECDF simultaneous band computation
through the ``ecdf_bands_kwargs`` dictionary. See `simultaneous_ecdf_bands` for keyword arguments
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
ShapeError
If there is a deviation form the expected shapes of `post_samples` and `prior_samples`.
"""
# Sanity checks
check_posterior_prior_shapes(post_samples, prior_samples)
# Store reference to number of parameters
n_params = post_samples.shape[-1]
# Compute fractional ranks (using broadcasting)
ranks = np.sum(post_samples < prior_samples[:, np.newaxis, :], axis=1) / post_samples.shape[1]
# Prepare figure
if stacked:
n_row, n_col = 1, 1
f, ax = plt.subplots(1, 1, figsize=fig_size)
else:
# Determine n_subplots dynamically
n_row = int(np.ceil(n_params / 6))
n_col = int(np.ceil(n_params / n_row))
# Determine fig_size dynamically, if None
if fig_size is None:
fig_size = (int(5 * n_col), int(5 * n_row))
# Initialize figure
f, ax = plt.subplots(n_row, n_col, figsize=fig_size)
# Plot individual ecdf of parameters
for j in range(ranks.shape[-1]):
ecdf_single = np.sort(ranks[:, j])
xx = ecdf_single
yy = np.arange(1, xx.shape[-1] + 1) / float(xx.shape[-1])
# Difference, if specified
if difference:
yy -= xx
if stacked:
if j == 0:
ax.plot(xx, yy, color=rank_ecdf_color, alpha=0.95, label="Rank ECDFs")
else:
ax.plot(xx, yy, color=rank_ecdf_color, alpha=0.95)
else:
ax.flat[j].plot(xx, yy, color=rank_ecdf_color, alpha=0.95, label="Rank ECDF")
# Compute uniform ECDF and bands
alpha, z, L, H = simultaneous_ecdf_bands(post_samples.shape[0], **kwargs.pop("ecdf_bands_kwargs", {}))
# Difference, if specified
if difference:
L -= z
H -= z
ylab = "ECDF difference"
else:
ylab = "ECDF"
# Add simultaneous bounds
if stacked:
titles = [None]
axes = [ax]
else:
axes = ax.flat
if param_names is None:
titles = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
else:
titles = param_names
for _ax, title in zip(axes, titles):
_ax.fill_between(z, L, H, color=fill_color, alpha=0.2, label=rf"{int((1-alpha) * 100)}$\%$ Confidence Bands")
# Prettify plot
sns.despine(ax=_ax)
_ax.grid(alpha=0.35)
_ax.legend(fontsize=legend_fontsize)
_ax.set_title(title, fontsize=title_fontsize)
_ax.tick_params(axis="both", which="major", labelsize=tick_fontsize)
_ax.tick_params(axis="both", which="minor", labelsize=tick_fontsize)
# Only add x-labels to the bottom row
if stacked:
bottom_row = [ax]
else:
bottom_row = ax if n_row == 1 else ax[-1, :]
for _ax in bottom_row:
_ax.set_xlabel("Fractional rank statistic", fontsize=label_fontsize)
# Only add y-labels to right left-most row
if n_row == 1: # if there is only one row, the ax array is 1D
axes[0].set_ylabel(ylab, fontsize=label_fontsize)
else: # if there is more than one row, the ax array is 2D
for _ax in ax[:, 0]:
_ax.set_ylabel(ylab, fontsize=label_fontsize)
# Remove unused axes entirely
for _ax in axes[n_params:]:
_ax.remove()
f.tight_layout()
return f
def plot_sbc_histograms(
post_samples,
prior_samples,
param_names=None,
fig_size=None,
num_bins=None,
binomial_interval=0.99,
label_fontsize=16,
title_fontsize=18,
tick_fontsize=12,
hist_color="#a34f4f",
):
"""Creates and plots publication-ready histograms of rank statistics for simulation-based calibration
(SBC) checks according to [1].
Any deviation from uniformity indicates miscalibration and thus poor convergence
of the networks or poor combination between generative model / networks.
[1] Talts, S., Betancourt, M., Simpson, D., Vehtari, A., & Gelman, A. (2018).
Validating Bayesian inference algorithms with simulation-based calibration.
arXiv preprint arXiv:1804.06788.
Parameters
----------
post_samples : np.ndarray of shape (n_data_sets, n_post_draws, n_params)
The posterior draws obtained from n_data_sets
prior_samples : np.ndarray of shape (n_data_sets, n_params)
The prior draws obtained for generating n_data_sets
param_names : list or None, optional, default: None
The parameter names for nice plot titles. Inferred if None
fig_size : tuple or None, optional, default : None
The figure size passed to the matplotlib constructor. Inferred if None
num_bins : int, optional, default: 10
The number of bins to use for each marginal histogram
binomial_interval : float in (0, 1), optional, default: 0.95
The width of the confidence interval for the binomial distribution
label_fontsize : int, optional, default: 14
The font size of the y-label text
title_fontsize : int, optional, default: 16
The font size of the title text
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
hist_color : str, optional, default '#a34f4f'
The color to use for the histogram body
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
ShapeError
If there is a deviation form the expected shapes of `post_samples` and `prior_samples`.
"""
# Sanity check
check_posterior_prior_shapes(post_samples, prior_samples)
# Determine the ratio of simulations to prior draws
n_sim, n_draws, n_params = post_samples.shape
ratio = int(n_sim / n_draws)
# Log a warning if N/B ratio recommended by Talts et al. (2018) < 20
if ratio < 20:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info(
f"The ratio of simulations / posterior draws should be > 20 "
+ f"for reliable variance reduction, but your ratio is {ratio}.\
Confidence intervals might be unreliable!"
)
# Set n_bins automatically, if nothing provided
if num_bins is None:
num_bins = int(ratio / 2)
# Attempt a fix if a single bin is determined so plot still makes sense
if num_bins == 1:
num_bins = 5
# Determine n params and param names if None given
if param_names is None:
param_names = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
# Determine n_subplots dynamically
n_row = int(np.ceil(n_params / 6))
n_col = int(np.ceil(n_params / n_row))
# Initialize figure
if fig_size is None:
fig_size = (int(5 * n_col), int(5 * n_row))
f, axarr = plt.subplots(n_row, n_col, figsize=fig_size)
# Compute ranks (using broadcasting)
ranks = np.sum(post_samples < prior_samples[:, np.newaxis, :], axis=1)
# Compute confidence interval and mean
N = int(prior_samples.shape[0])
# uniform distribution expected -> for all bins: equal probability
# p = 1 / num_bins that a rank lands in that bin
endpoints = binom.interval(binomial_interval, N, 1 / num_bins)
mean = N / num_bins # corresponds to binom.mean(N, 1 / num_bins)
# Plot marginal histograms in a loop
if n_row > 1:
ax = axarr.flat
else:
ax = axarr
for j in range(len(param_names)):
ax[j].axhspan(endpoints[0], endpoints[1], facecolor="gray", alpha=0.3)
ax[j].axhline(mean, color="gray", zorder=0, alpha=0.9)
sns.histplot(ranks[:, j], kde=False, ax=ax[j], color=hist_color, bins=num_bins, alpha=0.95)
ax[j].set_title(param_names[j], fontsize=title_fontsize)
ax[j].spines["right"].set_visible(False)
ax[j].spines["top"].set_visible(False)
ax[j].get_yaxis().set_ticks([])
ax[j].set_ylabel("")
ax[j].tick_params(axis="both", which="major", labelsize=tick_fontsize)
ax[j].tick_params(axis="both", which="minor", labelsize=tick_fontsize)
# Only add x-labels to the bottom row
bottom_row = axarr if n_row == 1 else axarr[0] if n_col == 1 else axarr[n_row - 1, :]
for _ax in bottom_row:
_ax.set_xlabel("Rank statistic", fontsize=label_fontsize)
# Remove unused axes entirely
for _ax in axarr[n_params:]:
_ax.remove()
f.tight_layout()
return f
def plot_posterior_2d(
posterior_draws,
prior=None,
prior_draws=None,
param_names=None,
height=3,
label_fontsize=14,
legend_fontsize=16,
tick_fontsize=12,
post_color="#8f2727",
prior_color="gray",
post_alpha=0.9,
prior_alpha=0.7,
):
"""Generates a bivariate pairplot given posterior draws and optional prior or prior draws.
posterior_draws : np.ndarray of shape (n_post_draws, n_params)
The posterior draws obtained for a SINGLE observed data set.
prior : bayesflow.forward_inference.Prior instance or None, optional, default: None
The optional prior object having an input-output signature as given by ayesflow.forward_inference.Prior
prior_draws : np.ndarray of shape (n_prior_draws, n_params) or None, optonal (default: None)
The optional prior draws obtained from the prior. If both prior and prior_draws are provided, prior_draws
will be used.
param_names : list or None, optional, default: None
The parameter names for nice plot titles. Inferred if None
height : float, optional, default: 3
The height of the pairplot
label_fontsize : int, optional, default: 14
The font size of the x and y-label texts (parameter names)
legend_fontsize : int, optional, default: 16
The font size of the legend text
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
post_color : str, optional, default: '#8f2727'
The color for the posterior histograms and KDEs
priors_color : str, optional, default: gray
The color for the optional prior histograms and KDEs
post_alpha : float in [0, 1], optonal, default: 0.9
The opacity of the posterior plots
prior_alpha : float in [0, 1], optonal, default: 0.7
The opacity of the prior plots
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
AssertionError
If the shape of posterior_draws is not 2-dimensional.
"""
# Ensure correct shape
assert (
len(posterior_draws.shape)
) == 2, "Shape of `posterior_samples` for a single data set should be 2 dimensional!"
# Obtain n_draws and n_params
n_draws, n_params = posterior_draws.shape
# If prior object is given and no draws, obtain draws
if prior is not None and prior_draws is None:
draws = prior(n_draws)
if type(draws) is dict:
prior_draws = draws["prior_draws"]
else:
prior_draws = draws
# Otherwise, keep as is (prior_draws either filled or None)
else:
pass
# Attempt to determine parameter names
if param_names is None:
if hasattr(prior, "param_names"):
if prior.param_names is not None:
param_names = prior.param_names
else:
param_names = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
else:
param_names = [f"$\\theta_{{{i}}}$" for i in range(1, n_params + 1)]
# Pack posterior draws into a dataframe
posterior_draws_df = pd.DataFrame(posterior_draws, columns=param_names)
# Add posterior
g = sns.PairGrid(posterior_draws_df, height=height)
g.map_diag(sns.histplot, fill=True, color=post_color, alpha=post_alpha, kde=True)
g.map_lower(sns.kdeplot, fill=True, color=post_color, alpha=post_alpha)
# Add prior, if given
if prior_draws is not None:
prior_draws_df = pd.DataFrame(prior_draws, columns=param_names)
g.data = prior_draws_df
g.map_diag(sns.histplot, fill=True, color=prior_color, alpha=prior_alpha, kde=True, zorder=-1)
g.map_lower(sns.kdeplot, fill=True, color=prior_color, alpha=prior_alpha, zorder=-1)
# Add legend, if prior also given
if prior_draws is not None or prior is not None:
handles = [
Line2D(xdata=[], ydata=[], color=post_color, lw=3, alpha=post_alpha),
Line2D(xdata=[], ydata=[], color=prior_color, lw=3, alpha=prior_alpha),
]
g.fig.legend(handles, ["Posterior", "Prior"], fontsize=legend_fontsize, loc="center right")
# Remove upper axis
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
g.axes[i, j].axis("off")
# Modify tick sizes
for i, j in zip(*np.tril_indices_from(g.axes, 1)):
g.axes[i, j].tick_params(axis="both", which="major", labelsize=tick_fontsize)
g.axes[i, j].tick_params(axis="both", which="minor", labelsize=tick_fontsize)
# Add nice labels
for i, param_name in enumerate(param_names):
g.axes[i, 0].set_ylabel(param_name, fontsize=label_fontsize)
g.axes[len(param_names) - 1, i].set_xlabel(param_name, fontsize=label_fontsize)
# Add grids
for i in range(n_params):
for j in range(n_params):
g.axes[i, j].grid(alpha=0.5)
g.tight_layout()
return g.fig
def plot_losses(
train_losses,
val_losses=None,
fig_size=None,
train_color="#8f2727",
val_color="black",
lw_train=2,
lw_val=3,
grid_alpha=0.5,
legend_fontsize=14,
label_fontsize=14,
title_fontsize=16,
):
"""A generic helper function to plot the losses of a series of training epochs and runs.
Parameters
----------
train_losses : pd.DataFrame
The (plottable) history as returned by a train_[...] method of a ``Trainer`` instance.
Alternatively, you can just pass a data frame of validation losses instead of train losses,
if you only want to plot the validation loss.
val_losses : pd.DataFrame or None, optional, default: None
The (plottable) validation history as returned by a train_[...] method of a ``Trainer`` instance.
If left ``None``, only train losses are plotted. Should have the same number of columns
as ``train_losses``.
fig_size : tuple or None, optional, default: None
The figure size passed to the ``matplotlib`` constructor. Inferred if ``None``
train_color : str, optional, default: '#8f2727'
The color for the train loss trajectory
val_color : str, optional, default: black
The color for the optional validation loss trajectory
lw_train : int, optional, default: 2
The linewidth for the training loss curve
lw_val : int, optional, default: 3
The linewidth for the validation loss curve
grid_alpha : float, optional, default 0.5
The opacity factor for the background gridlines
legend_fontsize : int, optional, default: 14
The font size of the legend text
label_fontsize : int, optional, default: 14
The font size of the y-label text
title_fontsize : int, optional, default: 16
The font size of the title text
Returns
-------
f : plt.Figure - the figure instance for optional saving
Raises
------
AssertionError
If the number of columns in ``train_losses`` does not match the
number of columns in ``val_losses``.
"""
# Determine the number of rows for plot
n_row = len(train_losses.columns)
# Initialize figure
if fig_size is None:
fig_size = (16, int(4 * n_row))
f, axarr = plt.subplots(n_row, 1, figsize=fig_size)
# Get the number of steps as an array
train_step_index = np.arange(1, len(train_losses) + 1)
if val_losses is not None:
val_step = int(np.floor(len(train_losses) / len(val_losses)))
val_step_index = train_step_index[(val_step - 1) :: val_step]
# If unequal length due to some reason, attempt a fix
if val_step_index.shape[0] > val_losses.shape[0]:
val_step_index = val_step_index[: val_losses.shape[0]]
# Loop through loss entries and populate plot
looper = [axarr] if n_row == 1 else axarr.flat
for i, ax in enumerate(looper):
# Plot train curve
ax.plot(train_step_index, train_losses.iloc[:, i], color=train_color, lw=lw_train, alpha=0.9, label="Training")
# Plot optional val curve
if val_losses is not None:
if i < val_losses.shape[1]:
ax.plot(
val_step_index,
val_losses.iloc[:, i],
linestyle="--",
marker="o",
color=val_color,
lw=lw_val,
label="Validation",
)
# Schmuck
ax.set_xlabel("Training step #", fontsize=label_fontsize)
ax.set_ylabel("Loss value", fontsize=label_fontsize)
sns.despine(ax=ax)
ax.grid(alpha=grid_alpha)
ax.set_title(train_losses.columns[i], fontsize=title_fontsize)
# Only add legend if there is a validation curve
if val_losses is not None:
ax.legend(fontsize=legend_fontsize)
f.tight_layout()
return f
def plot_prior2d(prior, param_names=None, n_samples=2000, height=2.5, color="#8f2727", **kwargs):
"""Creates pairplots for a given joint prior.
Parameters
----------
prior : callable
The prior object which takes a single integer argument and generates random draws.
param_names : list of str or None, optional, default None
An optional list of strings which
n_samples : int, optional, default: 1000
The number of random draws from the joint prior
height : float, optional, default: 2.5
The height of the pair plot
color : str, optional, defailt : '#8f2727'
The color of the plot
**kwargs : dict, optional
Additional keyword arguments passed to the sns.PairGrid constructor
Returns
-------
f : plt.Figure - the figure instance for optional saving
"""
# Generate prior draws
prior_samples = prior(n_samples)
# Handle dict type
if type(prior_samples) is dict:
prior_samples = prior_samples["prior_draws"]
# Get latent dimensionality and prepare titles
dim = prior_samples.shape[-1]
# Convert samples to a pandas data frame
if param_names is None:
titles = [f"Prior Param. {i}" for i in range(1, dim + 1)]
else:
titles = [f"Prior {p}" for p in param_names]
data_to_plot = pd.DataFrame(prior_samples, columns=titles)
# Generate plots
g = sns.PairGrid(data_to_plot, height=height, **kwargs)
g.map_diag(sns.histplot, fill=True, color=color, alpha=0.9, kde=True)
# Kernel density estimation (KDE) may not always be possible (e.g. with parameters whose correlation is close to 1 or -1).
# In this scenario, a scatter-plot is generated instead.
try:
g.map_lower(sns.kdeplot, fill=True, color=color, alpha=0.9)
except Exception as e:
logging.warn("KDE failed due to the following exception:\n" + repr(e) + "\nSubstituting scatter plot.")
g.map_lower(plt.scatter, alpha=0.6, s=40, edgecolor="k", color=color)
g.map_upper(plt.scatter, alpha=0.6, s=40, edgecolor="k", color=color)
# Add grids
for i in range(dim):
for j in range(dim):
g.axes[i, j].grid(alpha=0.5)
g.tight_layout()
return g.fig
def plot_latent_space_2d(z_samples, height=2.5, color="#8f2727", **kwargs):
"""Creates pairplots for the latent space learned by the inference network. Enables
visual inspection of the the latent space and whether its structrue corresponds to the
one enforced by the optimization criterion.
Parameters
----------
z_samples : np.ndarray or tf.Tensor of shape (n_sim, n_params)
The latent samples computed through a forward pass of the inference network.
height : float, optional, default: 2.5
The height of the pair plot.
color : str, optional, defailt : '#8f2727'
The color of the plot
**kwargs : dict, optional
Additional keyword arguments passed to the sns.PairGrid constructor
Returns
-------
f : plt.Figure - the figure instance for optional saving
"""
# Try to convert z_samples, if eventually tf.Tensor is passed
if type(z_samples) is not np.ndarray:
z_samples = z_samples.numpy()
# Get latent dimensionality and prepare titles
z_dim = z_samples.shape[-1]
# Convert samples to a pandas data frame
titles = [f"Latent Dim. {i}" for i in range(1, z_dim + 1)]
data_to_plot = pd.DataFrame(z_samples, columns=titles)
# Generate plots
g = sns.PairGrid(data_to_plot, height=height, **kwargs)
g.map_diag(sns.histplot, fill=True, color=color, alpha=0.9, kde=True)
g.map_lower(sns.kdeplot, fill=True, color=color, alpha=0.9)
g.map_upper(plt.scatter, alpha=0.6, s=40, edgecolor="k", color=color)
# Add grids
for i in range(z_dim):
for j in range(z_dim):
g.axes[i, j].grid(alpha=0.5)
g.tight_layout()
return g.fig
def plot_calibration_curves(
true_models,
pred_models,
model_names=None,
num_bins=10,
label_fontsize=16,
legend_fontsize=14,
title_fontsize=18,
tick_fontsize=12,
fig_size=None,
color="#8f2727",
):
"""Plots the calibration curves, the ECEs and the marginal histograms of predicted posterior model probabilities
for a model comparison problem. The marginal histograms inform about the fraction of predictions in each bin.
Depends on the ``expected_calibration_error`` function for computing the ECE.
Parameters
----------
true_models : np.ndarray of shape (num_data_sets, num_models)
The one-hot-encoded true model indices per data set.
pred_models : np.ndarray of shape (num_data_sets, num_models)
The predicted posterior model probabilities (PMPs) per data set.
model_names : list or None, optional, default: None
The model names for nice plot titles. Inferred if None.
num_bins : int, optional, default: 10
The number of bins to use for the calibration curves (and marginal histograms).
label_fontsize : int, optional, default: 16
The font size of the y-label and y-label texts
legend_fontsize : int, optional, default: 14
The font size of the legend text (ECE value)
title_fontsize : int, optional, default: 16
The font size of the title text. Only relevant if `stacked=False`
tick_fontsize : int, optional, default: 12
The font size of the axis ticklabels
fig_size : tuple or None, optional, default: None
The figure size passed to the ``matplotlib`` constructor. Inferred if ``None``
color : str, optional, default: '#8f2727'
The color of the calibration curves
Returns
-------
fig : plt.Figure - the figure instance for optional saving
"""
num_models = true_models.shape[-1]
if model_names is None:
model_names = [rf"$M_{{{m}}}$" for m in range(1, num_models + 1)]
# Determine n_subplots dynamically
n_row = int(np.ceil(num_models / 6))
n_col = int(np.ceil(num_models / n_row))
cal_errs, cal_probs = expected_calibration_error(true_models, pred_models, num_bins)
# Initialize figure
if fig_size is None:
fig_size = (int(5 * n_col), int(5 * n_row))
fig, axarr = plt.subplots(n_row, n_col, figsize=fig_size)
if n_row > 1:
ax = axarr.flat
# Plot marginal calibration curves in a loop
if n_row > 1:
ax = axarr.flat
else:
ax = axarr
for j in range(num_models):
# Plot calibration curve
ax[j].plot(cal_probs[j][0], cal_probs[j][1], color=color)
# Plot AB line
ax[j].plot(ax[j].get_xlim(), ax[j].get_xlim(), "--", color="darkgrey")
# Plot PMP distribution over bins
uniform_bins = np.linspace(0.0, 1.0, num_bins + 1)
norm_weights = np.ones_like(pred_models) / len(pred_models)
ax[j].hist(pred_models[:, j], bins=uniform_bins, weights=norm_weights[:, j], color="grey", alpha=0.3)
# Tweak plot
ax[j].spines["right"].set_visible(False)
ax[j].spines["top"].set_visible(False)
ax[j].set_xlim([0, 1])
ax[j].set_ylim([0, 1])
ax[j].set_xlabel("Predicted probability", fontsize=label_fontsize)
ax[j].set_ylabel("True probability", fontsize=label_fontsize)
ax[j].set_xticks([0.2, 0.4, 0.6, 0.8, 1.0])
ax[j].set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
ax[j].grid(alpha=0.5)
ax[j].text(
0.1,
0.9,
r"$\widehat{{\mathrm{{ECE}}}}$ = {0:.3f}".format(cal_errs[j]),
horizontalalignment="left",
verticalalignment="center",
transform=ax[j].transAxes,
size=legend_fontsize,
)
ax[j].tick_params(axis="both", which="major", labelsize=tick_fontsize)
ax[j].tick_params(axis="both", which="minor", labelsize=tick_fontsize)
# Set title
ax[j].set_title(model_names[j], fontsize=title_fontsize)
fig.tight_layout()
return fig
def plot_confusion_matrix(
true_models,
pred_models,
model_names=None,
fig_size=(5, 5),
title_fontsize=18,
tick_fontsize=12,
xtick_rotation=None,
ytick_rotation=None,
normalize=True,
cmap=None,
title=True,
):
"""Plots a confusion matrix for validating a neural network trained for Bayesian model comparison.
Parameters
----------
true_models : np.ndarray of shape (num_data_sets, num_models)
The one-hot-encoded true model indices per data set.
pred_models : np.ndarray of shape (num_data_sets, num_models)
The predicted posterior model probabilities (PMPs) per data set.
model_names : list or None, optional, default: None
The model names for nice plot titles. Inferred if None.
fig_size : tuple or None, optional, default: (5, 5)
The figure size passed to the ``matplotlib`` constructor. Inferred if ``None``
title_fontsize : int, optional, default: 18
The font size of the title text.
tick_fontsize : int, optional, default: 12
The font size of the axis label and model name texts.
xtick_rotation: int, optional, default: None
Rotation of x-axis tick labels (helps with long model names).
ytick_rotation: int, optional, default: None
Rotation of y-axis tick labels (helps with long model names).
normalize : bool, optional, default: True
A flag for normalization of the confusion matrix.
If True, each row of the confusion matrix is normalized to sum to 1.
cmap : matplotlib.colors.Colormap or str, optional, default: None
Colormap to be used for the cells. If a str, it should be the name of a registered colormap,
e.g., 'viridis'. Default colormap matches the BayesFlow defaults by ranging from white to red.
title : bool, optional, default True
A flag for adding 'Confusion Matrix' above the matrix.
Returns
-------
fig : plt.Figure - the figure instance for optional saving
"""
if model_names is None:
num_models = true_models.shape[-1]
model_names = [rf"$M_{{{m}}}$" for m in range(1, num_models + 1)]
if cmap is None:
cmap = LinearSegmentedColormap.from_list("", ["white", "#8f2727"])
# Flatten input
true_models = np.argmax(true_models, axis=1)
pred_models = np.argmax(pred_models, axis=1)
# Compute confusion matrix
cm = confusion_matrix(true_models, pred_models)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
# Initialize figure
fig, ax = plt.subplots(1, 1, figsize=fig_size)
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax, shrink=0.7)
ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]))
ax.set_xticklabels(model_names, fontsize=tick_fontsize)
if xtick_rotation:
plt.xticks(rotation=xtick_rotation, ha="right")
ax.set_yticklabels(model_names, fontsize=tick_fontsize)
if ytick_rotation:
plt.yticks(rotation=ytick_rotation)
ax.set_xlabel("Predicted model", fontsize=tick_fontsize)
ax.set_ylabel("True model", fontsize=tick_fontsize)
# Loop over data dimensions and create text annotations
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black"
)
if title:
ax.set_title("Confusion Matrix", fontsize=title_fontsize)
return fig
def plot_mmd_hypothesis_test(mmd_null,
mmd_observed=None,
alpha_level=0.05,
null_color=(0.16407, 0.020171, 0.577478),
observed_color="red",
alpha_color="orange",
truncate_vlines_at_kde=False,
xmin=None,
xmax=None,
bw_factor=1.5):
"""
Parameters
----------
mmd_null: np.ndarray
samples from the MMD sampling distribution under the null hypothesis "the model is well-specified"
mmd_observed: float
observed MMD value
alpha_level: float
rejection probability (type I error)
null_color: color
color for the H0 sampling distribution
observed_color: color
color for the observed MMD
alpha_color: color
color for the rejection area
truncate_vlines_at_kde: bool
true: cut off the vlines at the kde
false: continue kde lines across the plot
xmin: float
lower x axis limit
xmax: float
upper x axis limit
bw_factor: float, default: 1.5
bandwidth (aka. smoothing parameter) of the kernel density estimate
Returns
-------
f : plt.Figure - the figure instance for optional saving
"""
def draw_vline_to_kde(x, kde_object, color, label=None, **kwargs):
kde_x, kde_y = kde_object.lines[0].get_data()
idx = np.argmin(np.abs(kde_x - x))
plt.vlines(x=x, ymin=0, ymax=kde_y[idx], color=color, linewidth=3, label=label, **kwargs)
def fill_area_under_kde(kde_object, x_start, x_end=None, **kwargs):
kde_x, kde_y = kde_object.lines[0].get_data()
if x_end is not None:
plt.fill_between(kde_x, kde_y, where=(kde_x >= x_start) & (kde_x <= x_end),
interpolate=True, **kwargs)
else:
plt.fill_between(kde_x, kde_y, where=(kde_x >= x_start),
interpolate=True, **kwargs)
f = plt.figure(figsize=(8, 4))
kde = sns.kdeplot(mmd_null, fill=False, linewidth=0, bw_adjust=bw_factor)
sns.kdeplot(mmd_null, fill=True, alpha=.12, color=null_color, bw_adjust=bw_factor)
if truncate_vlines_at_kde:
draw_vline_to_kde(x=mmd_observed, kde_object=kde, color=observed_color, label=r"Observed data")
else:
plt.vlines(x=mmd_observed, ymin=0, ymax=plt.gca().get_ylim()[1], color=observed_color, linewidth=3,
label=r"Observed data")
mmd_critical = np.quantile(mmd_null, 1 - alpha_level)
fill_area_under_kde(kde, mmd_critical, color=alpha_color, alpha=0.5, label=fr"{int(alpha_level*100)}% rejection area")
if truncate_vlines_at_kde:
draw_vline_to_kde(x=mmd_critical, kde_object=kde, color=alpha_color)
else:
plt.vlines(x=mmd_critical, color=alpha_color, linewidth=3, ymin=0, ymax=plt.gca().get_ylim()[1])
sns.kdeplot(mmd_null, fill=False, linewidth=3, color=null_color, label=r"$H_0$", bw_adjust=bw_factor)
plt.xlabel(r"MMD", fontsize=20)
plt.ylabel("")
plt.yticks([])
plt.xlim(xmin, xmax)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.legend(fontsize=20)
sns.despine()
return f
| 49,484 | 37.599844 | 126 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/attention.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
from tensorflow.keras.layers import Dense, LayerNormalization, MultiHeadAttention
from tensorflow.keras.models import Sequential
class MultiHeadAttentionBlock(tf.keras.Model):
"""Implements the MAB block from [1] which represents learnable cross-attention.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs):
"""Creates a multihead attention block which will typically be used as part of a
set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.att = MultiHeadAttention(**attention_settings)
self.ln_pre = LayerNormalization() if use_layer_norm else None
self.fc = Sequential([Dense(**dense_settings) for _ in range(num_dense_fc)])
self.fc.add(Dense(input_dim))
self.ln_post = LayerNormalization() if use_layer_norm else None
def call(self, x, y, **kwargs):
"""Performs the forward pass through the attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size_x, input_dim)
y : tf.Tensor
Input of shape (batch_size, set_size_y, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size_x, input_dim)
"""
h = x + self.att(x, y, y, **kwargs)
if self.ln_pre is not None:
h = self.ln_pre(h, **kwargs)
out = h + self.fc(h, **kwargs)
if self.ln_post is not None:
out = self.ln_post(out, **kwargs)
return out
class SelfAttentionBlock(tf.keras.Model):
"""Implements the SAB block from [1] which represents learnable self-attention.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs):
"""Creates a self-attention attention block which will typically be used as part of a
set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.mab = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
def call(self, x, **kwargs):
"""Performs the forward pass through the self-attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size, input_dim)
"""
return self.mab(x, x, **kwargs)
class InducedSelfAttentionBlock(tf.keras.Model):
"""Implements the ISAB block from [1] which represents learnable self-attention specifically
designed to deal with large sets via a learnable set of "inducing points".
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self, input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_inducing_points, **kwargs
):
"""Creates a self-attention attention block with inducing points (ISAB) which will typically
be used as part of a set transformer architecture according to [1].
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
num_inducing_points : int
The number of inducing points. Should be lower than the smallest set size
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
init = tf.keras.initializers.GlorotUniform()
self.I = tf.Variable(init(shape=(num_inducing_points, input_dim)), name="I", trainable=True)
self.mab0 = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.mab1 = MultiHeadAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
def call(self, x, **kwargs):
"""Performs the forward pass through the self-attention layer.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, set_size, input_dim)
"""
batch_size = tf.shape(x)[0]
I_expanded = self.I[None, ...]
I_tiled = tf.tile(I_expanded, [batch_size, 1, 1])
h = self.mab0(I_tiled, x, **kwargs)
return self.mab1(x, h, **kwargs)
class PoolingWithAttention(tf.keras.Model):
"""Implements the pooling with multihead attention (PMA) block from [1] which represents
a permutation-invariant encoder for set-based inputs.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self, summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_seeds=1, **kwargs
):
"""Creates a multihead attention block (MAB) which will perform cross-attention between an input set
and a set of seed vectors (typically one for a single summary) with summary_dim output dimensions.
Could also be used as part of a ``DeepSet`` for representing learnabl instead of fixed pooling.
Parameters
----------
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
attention_settings : dict
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
See https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention.
num_dense_fc : int
The number of hidden layers for the internal feedforward network
dense_settings : dict
A dictionary which will be unpacked as the arguments for the ``Dense`` layer
use_layer_norm : boolean
Whether layer normalization before and after attention + feedforward
num_seeds : int, optional, default: 1
The number of "seed vectors" to use. Each seed vector represents a permutation-invariant
summary of the entire set. If you use ``num_seeds > 1``, the resulting seeds will be flattened
into a 2-dimensional output, which will have a dimensionality of ``num_seeds * summary_dim``
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
self.mab = MultiHeadAttentionBlock(
summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, **kwargs
)
init = tf.keras.initializers.GlorotUniform()
self.seed_vec = tf.Variable(init(shape=(num_seeds, summary_dim)), name="seed_vec", trainable=True)
self.fc = Sequential([Dense(**dense_settings) for _ in range(num_dense_fc)])
self.fc.add(Dense(summary_dim))
def call(self, x, **kwargs):
"""Performs the forward pass through the PMA block.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, num_seeds * summary_dim)
"""
out = self.fc(x)
batch_size = tf.shape(x)[0]
seed_expanded = self.seed_vec[None, ...]
seed_tiled = tf.tile(seed_expanded, [batch_size, 1, 1])
out = self.mab(seed_tiled, out, **kwargs)
return tf.reshape(out, (tf.shape(out)[0], -1))
| 12,081 | 43.914498 | 120 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/summary_networks.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from warnings import warn
import tensorflow as tf
from tensorflow.keras.layers import GRU, LSTM, Dense
from tensorflow.keras.models import Sequential
from bayesflow import default_settings as defaults
from bayesflow.attention import (
InducedSelfAttentionBlock,
MultiHeadAttentionBlock,
PoolingWithAttention,
SelfAttentionBlock,
)
from bayesflow.helper_networks import EquivariantModule, InvariantModule, MultiConv1D
class TimeSeriesTransformer(tf.keras.Model):
"""Implements a many-to-one transformer architecture for time series encoding.
Some ideas can be found in [1]:
[1] Wen, Q., Zhou, T., Zhang, C., Chen, W., Ma, Z., Yan, J., & Sun, L. (2022).
Transformers in time series: A survey. arXiv preprint arXiv:2202.07125.
https://arxiv.org/abs/2202.07125
"""
def __init__(
self,
input_dim,
attention_settings=None,
dense_settings=None,
use_layer_norm=True,
num_dense_fc=2,
summary_dim=10,
num_attention_blocks=2,
template_type="lstm",
template_dim=64,
**kwargs,
):
"""Creates a transformer architecture for encoding time series data into fixed size vectors given by
``summary_dim``. It features a recurrent network given by ``template_type`` which is responsible for
providing a single summary of the time series which then attends to each point in the time series pro-
cessed via a series of ``num_attention_blocks`` self-attention layers.
Important: Assumes that positional encodings have been appended to the input time series.
Recommnded: When using transformers as summary networks, you may want to use a smaller learning rate
during training, e.g., setting ``default_lr=1e-5`` in a ``Trainer`` instance.
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict or None, optional, default None
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer.
If ``None``, default settings will be used (see ``bayesflow.default_settings``)
For instance, to use an attention block with 4 heads and key dimension 32, you can do:
``attention_settings=dict(num_heads=4, key_dim=32)``
You may also want to include dropout regularization in small-to-medium data regimes:
``attention_settings=dict(num_heads=4, key_dim=32, dropout=0.1)``
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention
dense_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``Dense`` layer.
For instance, to use hidden layers with 32 units and a relu activation, you can do:
``dict(units=32, activation='relu')
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
use_layer_norm : boolean, optional, default: True
Whether layer normalization before and after attention + feedforward
num_dense_fc : int, optional, default: 2
The number of hidden layers for the internal feedforward network
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
num_attention_blocks : int, optional, default: 2
The number of self-attention blocks to use before pooling.
template_type : str or callable, optional, default: 'lstm'
The many-to-one (learnable) transformation of the time series.
if ``lstm``, an LSTM network will be used.
if ``gru``, a GRU unit will be used.
if callable, a reference to ``template_type`` will be stored as an attribute.
template_dim : int, optional, default: 64
Only used if ``template_type`` in ['lstm', 'gru']. The number of hidden
units (equiv. output dimensions) of the recurrent network.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Process internal attention settings
if attention_settings is None:
attention_settings = defaults.DEFAULT_SETTING_ATTENTION
if dense_settings is None:
dense_settings = defaults.DEFAULT_SETTING_DENSE_ATTENTION
# Construct a series of self-attention blocks, these will process
# the time series in a many-to-many fashion
self.attention_blocks = Sequential()
for _ in range(num_attention_blocks):
block = SelfAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.attention_blocks.add(block)
# Construct final attention layer, which will perform cross-attention
# between the outputs ot the self-attention layers and the dynamic template
self.output_attention = MultiHeadAttentionBlock(
template_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm
)
# A recurrent network will learn the dynamic many-to-one template
if template_type.upper() == "LSTM":
self.template_net = LSTM(template_dim)
elif template_type.upper() == "GRU":
self.template_net = GRU(template_dim)
else:
assert callable(template_type), "Argument `template_dim` should be callable or in ['lstm', 'gru']"
self.template_net = template_type
# Final output reduces representation into a vector of length summary_dim
self.output_layer = Dense(summary_dim)
def call(self, x, **kwargs):
"""Performs the forward pass through the transformer.
Parameters
----------
x : tf.Tensor
Time series input of shape (batch_size, num_time_points, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim)
"""
rep = self.attention_blocks(x, **kwargs)
template = self.template_net(x, **kwargs)
rep = self.output_attention(tf.expand_dims(template, axis=1), rep, **kwargs)
rep = tf.squeeze(rep, axis=1)
out = self.output_layer(rep)
return out
class SetTransformer(tf.keras.Model):
"""Implements the set transformer architecture from [1] which ultimately represents
a learnable permutation-invariant function.
[1] Lee, J., Lee, Y., Kim, J., Kosiorek, A., Choi, S., & Teh, Y. W. (2019).
Set transformer: A framework for attention-based permutation-invariant neural networks.
In International conference on machine learning (pp. 3744-3753). PMLR.
"""
def __init__(
self,
input_dim,
attention_settings=None,
dense_settings=None,
use_layer_norm=True,
num_dense_fc=2,
summary_dim=10,
num_attention_blocks=2,
num_inducing_points=32,
num_seeds=1,
**kwargs,
):
"""Creates a set transformer architecture according to [1] which will extract permutation-invariant
features from an input set using a set of seed vectors (typically one for a single summary) with ``summary_dim``
output dimensions.
Recommnded: When using transformers as summary networks, you may want to use a smaller learning rate
during training, e.g., setting ``default_lr=1e-5`` in a ``Trainer`` instance.
Parameters
----------
input_dim : int
The dimensionality of the input data (last axis).
attention_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``MultiHeadAttention`` layer
For instance, to use an attention block with 4 heads and key dimension 32, you can do:
``attention_settings=dict(num_heads=4, key_dim=32)``
You may also want to include dropout regularization in small-to-medium data regimes:
``attention_settings=dict(num_heads=4, key_dim=32, dropout=0.1)``
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention
dense_settings : dict or None, optional, default: None
A dictionary which will be unpacked as the arguments for the ``Dense`` layer.
For instance, to use hidden layers with 32 units and a relu activation, you can do:
``dict(units=32, activation='relu')
For more details and arguments, see:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
use_layer_norm : boolean, optional, default: True
Whether layer normalization before and after attention + feedforward
num_dense_fc : int, optional, default: 2
The number of hidden layers for the internal feedforward network
summary_dim : int
The dimensionality of the learned permutation-invariant representation.
num_attention_blocks : int, optional, default: 2
The number of self-attention blocks to use before pooling.
num_inducing_points : int or None, optional, default: 32
The number of inducing points. Should be lower than the smallest set size.
If ``None`` selected, a vanilla self-attenion block (SAB) will be used, otherwise
ISAB blocks will be used. For ``num_attention_blocks > 1``, we currently recommend
always using some number of inducing points.
num_seeds : int, optional, default: 1
The number of "seed vectors" to use. Each seed vector represents a permutation-invariant
summary of the entire set. If you use ``num_seeds > 1``, the resulting seeds will be flattened
into a 2-dimensional output, which will have a dimensionality of ``num_seeds * summary_dim``.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Process internal attention settings
if attention_settings is None:
attention_settings = defaults.DEFAULT_SETTING_ATTENTION
if dense_settings is None:
dense_settings = defaults.DEFAULT_SETTING_DENSE_ATTENTION
# Construct a series of self-attention blocks
self.attention_blocks = Sequential()
for _ in range(num_attention_blocks):
if num_inducing_points is not None:
block = InducedSelfAttentionBlock(
input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_inducing_points
)
else:
block = SelfAttentionBlock(input_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm)
self.attention_blocks.add(block)
# Pooler will be applied to the representations learned through self-attention
self.pooler = PoolingWithAttention(
summary_dim, attention_settings, num_dense_fc, dense_settings, use_layer_norm, num_seeds
)
def call(self, x, **kwargs):
"""Performs the forward pass through the set-transformer.
Parameters
----------
x : tf.Tensor
The input set of shape (batch_size, set_size, input_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim * num_seeds)
"""
out = self.attention_blocks(x, **kwargs)
out = self.pooler(out, **kwargs)
return out
class DeepSet(tf.keras.Model):
"""Implements a deep permutation-invariant network according to [1] and [2].
[1] Zaheer, M., Kottur, S., Ravanbakhsh, S., Poczos, B., Salakhutdinov, R. R., & Smola, A. J. (2017).
Deep sets. Advances in neural information processing systems, 30.
[2] Bloem-Reddy, B., & Teh, Y. W. (2020).
Probabilistic Symmetries and Invariant Neural Networks.
J. Mach. Learn. Res., 21, 90-1.
"""
def __init__(
self,
summary_dim=10,
num_dense_s1=2,
num_dense_s2=2,
num_dense_s3=2,
num_equiv=2,
dense_s1_args=None,
dense_s2_args=None,
dense_s3_args=None,
pooling_fun="mean",
**kwargs,
):
"""Creates a stack of 'num_equiv' equivariant layers followed by a final invariant layer.
Parameters
----------
summary_dim : int, optional, default: 10
The number of learned summary statistics.
num_dense_s1 : int, optional, default: 2
The number of dense layers in the inner function of a deep set.
num_dense_s2 : int, optional, default: 2
The number of dense layers in the outer function of a deep set.
num_dense_s3 : int, optional, default: 2
The number of dense layers in an equivariant layer.
num_equiv : int, optional, default: 2
The number of equivariant layers in the network.
dense_s1_args : dict or None, optional, default: None
The arguments for the dense layers of s1 (inner, pre-pooling function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
dense_s2_args : dict or None, optional, default: None
The arguments for the dense layers of s2 (outer, post-pooling function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
dense_s3_args : dict or None, optional, default: None
The arguments for the dense layers of s3 (equivariant function). If `None`,
defaults will be used (see `default_settings`). Otherwise, all arguments for a
tf.keras.layers.Dense layer are supported.
pooling_fun : str of callable, optional, default: 'mean'
If string argument provided, should be one in ['mean', 'max']. In addition, ac actual
neural network can be passed for learnable pooling.
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the __init__() method of tf.keras.Model.
"""
super().__init__(**kwargs)
# Prepare settings dictionary
settings = dict(
num_dense_s1=num_dense_s1,
num_dense_s2=num_dense_s2,
num_dense_s3=num_dense_s3,
dense_s1_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s1_args is None else dense_s1_args,
dense_s2_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s2_args is None else dense_s2_args,
dense_s3_args=defaults.DEFAULT_SETTING_DENSE_INVARIANT if dense_s3_args is None else dense_s3_args,
pooling_fun=pooling_fun,
)
# Create equivariant layers and final invariant layer
self.equiv_layers = Sequential([EquivariantModule(settings) for _ in range(num_equiv)])
self.inv = InvariantModule(settings)
# Output layer to output "summary_dim" learned summary statistics
self.out_layer = Dense(summary_dim, activation="linear")
self.summary_dim = summary_dim
def call(self, x):
"""Performs the forward pass of a learnable deep invariant transformation consisting of
a sequence of equivariant transforms followed by an invariant transform.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_obs, data_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, out_dim)
"""
# Pass through series of augmented equivariant transforms
out_equiv = self.equiv_layers(x)
# Pass through final invariant layer
out = self.out_layer(self.inv(out_equiv))
return out
class InvariantNetwork(DeepSet):
"""Deprecated class for ``InvariantNetwork``."""
def __init_subclass__(cls, **kwargs):
warn(
f"{cls.__name__} will be deprecated at some point. Use ``DeepSet`` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init_subclass__(**kwargs)
def __init__(self, *args, **kwargs):
warn(
f"{self.__class__.__name__} will be deprecated. at some point. Use ``DeepSet`` instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class SequentialNetwork(tf.keras.Model):
"""Implements a sequence of `MultiConv1D` layers followed by an LSTM network.
For details and rationale, see [1]:
[1] Radev, S. T., Graw, F., Chen, S., Mutters, N. T., Eichel, V. M., Bärnighausen, T., & Köthe, U. (2021).
OutbreakFlow: Model-based Bayesian inference of disease outbreak dynamics with invertible neural networks
and its application to the COVID-19 pandemics in Germany.
PLoS computational biology, 17(10), e1009472.
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1009472
"""
def __init__(self, summary_dim=10, num_conv_layers=2, lstm_units=128, conv_settings=None, **kwargs):
"""Creates a stack of inception-like layers followed by an LSTM network, with the idea
of learning vector representations from multivariate time series data.
Parameters
----------
summary_dim : int, optional, default: 10
The number of learned summary statistics.
num_conv_layers : int, optional, default: 2
The number of convolutional layers to use.
lstm_units : int, optional, default: 128
The number of hidden LSTM units.
conv_settings : dict or None, optional, default: None
The arguments passed to the `MultiConv1D` internal networks. If `None`,
defaults will be used from `default_settings`. If a dictionary is provided,
it should contain the followin keys:
- layer_args (dict) : arguments for `tf.keras.layers.Conv1D` without kernel_size
- min_kernel_size (int) : the minimum kernel size (>= 1)
- max_kernel_size (int) : the maximum kernel size
**kwargs : dict
Optional keyword arguments passed to the __init__() method of tf.keras.Model
"""
super().__init__(**kwargs)
# Take care of None conv_settings
if conv_settings is None:
conv_settings = defaults.DEFAULT_SETTING_MULTI_CONV
self.net = Sequential([MultiConv1D(conv_settings) for _ in range(num_conv_layers)])
self.lstm = LSTM(lstm_units)
self.out_layer = Dense(summary_dim, activation="linear")
self.summary_dim = summary_dim
def call(self, x, **kwargs):
"""Performs a forward pass through the network by first passing `x` through the sequence of
multi-convolutional layers and then applying the LSTM network.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_time_steps, n_time_series)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, summary_dim)
"""
out = self.net(x, **kwargs)
out = self.lstm(out, **kwargs)
out = self.out_layer(out, **kwargs)
return out
class SplitNetwork(tf.keras.Model):
"""Implements a vertical stack of networks and concatenates their individual outputs. Allows for splitting
of data to provide an individual network for each split of the data.
"""
def __init__(self, num_splits, split_data_configurator, network_type=InvariantNetwork, network_kwargs={}, **kwargs):
"""Creates a composite network of `num_splits` sub-networks of type `network_type`, each with configuration
specified by `meta`.
Parameters
----------
num_splits : int
The number if splits for the data, which will equal the number of sub-networks.
split_data_configurator : callable
Function that takes the arguments `i` and `x` where `i` is the index of the
network and `x` are the inputs to the `SplitNetwork`. Should return the input
for the corresponding network.
For example, to achieve a network with is permutation-invariant both
vertically (i.e., across rows) and horizontally (i.e., across columns), one could to:
`def split(i, x):
selector = tf.where(x[:,:,0]==i, 1.0, 0.0)
selected = x[:,:,1] * selector
split_x = tf.stack((selector, selected), axis=-1)
return split_x
`
where `x[:,:,0]` contains an integer indicating which split the data
in `x[:,:,1]` belongs to. All values in `x[:,:,1]` that are not selected
are set to zero. The selector is passed along with the modified data,
indicating which rows belong to the split `i`.
network_type : callable, optional, default: `InvariantNetowk`
Type of neural network to use.
meta : dict, optional, default: {}
A dictionary containing the configuration for the networks.
**kwargs
Optional keyword arguments to be passed to the `tf.keras.Model` superclass.
"""
super().__init__(**kwargs)
self.num_splits = num_splits
self.split_data_configurator = split_data_configurator
self.networks = [network_type(**network_kwargs) for _ in range(num_splits)]
def call(self, x):
"""Performs a forward pass through the subnetworks and concatenates their output.
Parameters
----------
x : tf.Tensor
Input of shape (batch_size, n_obs, data_dim)
Returns
-------
out : tf.Tensor
Output of shape (batch_size, out_dim)
"""
out = [self.networks[i](self.split_data_configurator(i, x)) for i in range(self.num_splits)]
out = tf.concat(out, axis=-1)
return out
class HierarchicalNetwork(tf.keras.Model):
"""Implements a hierarchical summary network according to [1].
[1] Elsemüller, L., Schnuerch, M., Bürkner, P. C., & Radev, S. T. (2023).
A Deep Learning Method for Comparing Bayesian Hierarchical Models.
arXiv preprint arXiv:2301.11873.
"""
def __init__(self, networks_list, **kwargs):
"""Creates a hierarchical network consisting of stacked summary networks (one for each hierarchical level)
that are aligned with the probabilistic structure of the processed data.
Note: The networks will start processing from the lowest hierarchical level (e.g., observational level)
up to the highest hierarchical level. It is recommended to provide higher-level networks with more
expressive power to allow for an adequate compression of lower-level data.
Example: For two-level hierarchical models with the assumption of temporal dependencies on the lowest
hierarchical level (e.g., observational level) and exchangeable units at the higher level
(e.g., group level), a list of [SequentialNetwork(), DeepSet()] could be passed.
----------
Parameters:
networks_list : list of tf.keras.Model:
The list of summary networks (one per hierarchical level), starting from the lowest hierarchical level
"""
super().__init__(**kwargs)
self.networks = networks_list
def call(self, x, return_all=False, **kwargs):
"""Performs the forward pass through the hierarchical network,
transforming the nested input into learned summary statistics.
Parameters
----------
data : tf.Tensor of shape (batch_size, ..., data_dim)
Example, hierarchical data sets with two levels:
(batch_size, D, L, x_dim) -> reduces to (batch_size, out_dim).
return_all : boolean, optional, default: False
Whether to return all intermediate outputs (True) or just
the final one (False).
Returns
-------
out : tf.Tensor
Output of shape ``(batch_size, out_dim) if return_all=False`` else a tuple
of ``len(outputs) == len(networks)`` corresponding to all outputs of all networks.
"""
if return_all:
outputs = []
for net in self.networks:
x = net(x, **kwargs)
outputs.append(x)
return outputs
else:
for net in self.networks:
x = net(x, **kwargs)
return x
| 26,357 | 42.93 | 120 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/helper_classes.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
from copy import deepcopy
import numpy as np
import pandas as pd
import tensorflow as tf
try:
import cPickle as pickle
except:
import pickle
import logging
logging.basicConfig()
from sklearn.linear_model import HuberRegressor
from bayesflow.default_settings import DEFAULT_KEYS
class SimulationDataset:
"""Helper class to create a tensorflow.data.Dataset which parses simulation dictionaries
and returns simulation dictionaries as expected by BayesFlow amortizers.
"""
def __init__(self, forward_dict, batch_size, buffer_size=1024):
"""Creates a wrapper holding a ``tf.data.Dataset`` instance for
offline training in an amortized estimation context.
Parameters
----------
forward_dict : dict
The outputs from a ``GenerativeModel`` or a custom function,
stored in a dictionary with at least the following keys:
``sim_data`` - an array representing the batched output of the model
``prior_draws`` - an array with prior generated from the model's prior
batch_size : int
The total number of simulations from all models in a given batch.
The batch size per model will be calculated as ``batch_size // num_models``
buffer_size : int, optional, default: 1024
The buffer size for shuffling elements in a ``tf.data.Dataset``
"""
slices, keys_used, keys_none, n_sim = self._determine_slices(forward_dict)
self.data = tf.data.Dataset.from_tensor_slices(tuple(slices)).shuffle(buffer_size).batch(batch_size)
self.keys_used = keys_used
self.keys_none = keys_none
self.n_sim = n_sim
self.num_batches = len(self.data)
def _determine_slices(self, forward_dict):
"""Determine slices for a tensorflow Dataset."""
keys_used = []
keys_none = []
slices = []
for k, v in forward_dict.items():
if forward_dict[k] is not None:
slices.append(v)
keys_used.append(k)
else:
keys_none.append(k)
n_sim = forward_dict[DEFAULT_KEYS["sim_data"]].shape[0]
return slices, keys_used, keys_none, n_sim
def __call__(self, batch_in):
"""Convert output of tensorflow.data.Dataset to dict."""
forward_dict = {}
for key_used, batch_stuff in zip(self.keys_used, batch_in):
forward_dict[key_used] = batch_stuff.numpy()
for key_none in zip(self.keys_none):
forward_dict[key_none] = None
return forward_dict
def __len__(self):
return len(self.data)
def __iter__(self):
return map(self, self.data)
class MultiSimulationDataset:
"""Helper class for model comparison training with multiple generative models.
Will create multiple ``SimulationDataset`` instances, each parsing their own
simulation dictionaries and returning these as expected by BayesFlow amortizers.
"""
def __init__(self, forward_dict, batch_size, buffer_size=1024):
"""Creates a wrapper holding multiple ``tf.data.Dataset`` instances for
offline training in an amortized model comparison context.
Parameters
----------
forward_dict : dict
The outputs from a ``MultiGenerativeModel`` or a custom function,
stored in a dictionary with at least the following keys:
``model_outputs`` - a list with length equal to the number of models,
each element representing a batched output of a single model
``model_indices`` - a list with integer model indices, which will
later be one-hot-encoded for the model comparison learning problem.
batch_size : int
The total number of simulations from all models in a given batch.
The batch size per model will be calculated as ``batch_size // num_models``
buffer_size : int, optional, default: 1024
The buffer size for shuffling elements in a ``tf.data.Dataset``
"""
self.model_indices = forward_dict[DEFAULT_KEYS["model_indices"]]
self.num_models = len(self.model_indices)
self.per_model_batch_size = batch_size // self.num_models
self.datasets = [
SimulationDataset(out, self.per_model_batch_size, buffer_size)
for out in forward_dict[DEFAULT_KEYS["model_outputs"]]
]
self.current_it = 0
self.num_batches = min([d.num_batches for d in self.datasets])
self.iters = [iter(d) for d in self.datasets]
self.batch_size = batch_size
def __next__(self):
if self.current_it < self.num_batches:
outputs = [next(d) for d in self.iters]
output_dict = {DEFAULT_KEYS["model_outputs"]: outputs, DEFAULT_KEYS["model_indices"]: self.model_indices}
self.current_it += 1
return output_dict
self.current_it = 0
self.iters = [iter(d) for d in self.datasets]
raise StopIteration
def __iter__(self):
return self
class EarlyStopper:
"""This class will track the total validation loss and trigger an early stopping
recommendation based on its hyperparameters."""
def __init__(self, patience=5, tolerance=0.05):
"""
patience : int, optional, default: 4
How many successive times the tolerance value is reached before triggering
an early stopping recommendation.
tolerance : float, optional, default: 0.05
The minimum reduction of validation loss to be considered significant.
"""
self.history = []
self.patience = patience
self.tolerance = tolerance
self._patience_counter = 0
def update_and_recommend(self, current_val_loss):
"""Adds loss to history and check difference between sequential losses."""
self.history.append(current_val_loss)
rec = self._check_patience()
return rec
def _check_patience(self):
"""Check whether the patience has been surpassed or not.
Assumes current_val_loss has previously been added to the internal
history, so it has at least one element.
"""
# Still not enough history, no recommendation
if len(self.history) <= 1:
return False
# Significant increase according to tolerance, reset patience
if (self.history[-2] - self.history[-1]) >= self.tolerance:
self._patience_counter = 0
return False
# Not a signifcant increase, check counter
else:
# Still no stop recommendation, but increase counter
if self._patience_counter < self.patience:
self._patience_counter += 1
return False
# Reset counter and recommend stop
else:
self._patience_counter = 0
return True
class RegressionLRAdjuster:
"""This class will compute the slope of the loss trajectory and inform learning rate decay."""
file_name = "lr_adjuster"
def __init__(
self,
optimizer,
period=1000,
wait_between_fits=10,
patience=10,
tolerance=-0.05,
reduction_factor=0.25,
cooldown_factor=2,
num_resets=3,
**kwargs,
):
"""Creates an instance with given hyperparameters which will track the slope of the
loss trajectory according to specified hyperparameters and then issue an optional
stopping suggestion.
Parameters
----------
optimizer : tf.keras.optimizers.Optimizer instance
An optimizer implementing a lr() method
period : int, optional, default: 1000
How much loss values to consider from the past
wait_between_fits : int, optional, default: 10
How many backpropagation updates to wait between two successive fits
patience : int, optional, default: 10
How many successive times the tolerance value is reached before lr update.
tolerance : float, optional, default: -0.05
The minimum slope to be considered substantial for training.
reduction_factor : float in [0, 1], optional, default: 0.25
The factor by which the learning rate is reduced upon hitting the `tolerance`
threshold for `patience` number of times
cooldown_factor : float, optional, default: 2
The factor by which the `period` is multiplied to arrive at a cooldown period.
num_resets : int, optional, default: 3
How many times to reduce the learning rate before issuing an optional stopping
**kwargs : dict, optional, default {}
Additional keyword arguments passed to the `HuberRegression` class.
"""
self.optimizer = optimizer
self.period = period
self.wait_between_periods = wait_between_fits
self.regressor = HuberRegressor(**kwargs)
self.t_vector = np.linspace(0, 1, self.period)[:, np.newaxis]
self.patience = patience
self.tolerance = tolerance
self.num_resets = num_resets
self.reduction_factor = reduction_factor
self.stopping_issued = False
self.cooldown_factor = cooldown_factor
self._history = {"iteration": [], "learning_rate": []}
self._reset_counter = 0
self._patience_counter = 0
self._cooldown_counter = 0
self._wait_counter = 0
self._slope = None
self._is_waiting = False
self._in_cooldown = False
def get_slope(self, losses):
"""Fits a Huber regression on the provided loss trajectory or returns `None` if
not enough data points present.
"""
# Return None if not enough loss values present
if losses.shape[0] < self.period:
return None
# Increment counter
if self._in_cooldown:
self._cooldown_counter += 1
# Check if still in a waiting phase and return old slope
# if still waiting, otherwise refit Huber regression
wait = self._check_waiting()
if wait:
return self._slope
else:
self.regressor.fit(self.t_vector, losses[-self.period :])
self._slope = self.regressor.coef_[0]
self._check_patience()
return self._slope
def reset(self):
"""Resets all stateful variables in preparation for a new start."""
self._reset_counter = 0
self._patience_counter = 0
self._cooldown_counter = 0
self._wait_counter = 0
self._in_cooldown = False
self._is_waiting = False
self.stopping_issued = False
def save_to_file(self, file_path):
"""Saves the state parameters of a RegressionLRAdjuster object to a pickled dictionary in file_path."""
# Create path to memory
memory_path = os.path.join(file_path, f"{RegressionLRAdjuster.file_name}.pkl")
# Prepare attributes
states_dict = {}
states_dict["_history"] = self._history
states_dict["_reset_counter"] = self._reset_counter
states_dict["_patience_counter"] = self._patience_counter
states_dict["_cooldown_counter"] = self._cooldown_counter
states_dict["_wait_counter"] = self._wait_counter
states_dict["_slope"] = self._slope
states_dict["_is_waiting"] = self._is_waiting
states_dict["_in_cooldown"] = self._in_cooldown
# Dump as pickle object
with open(memory_path, "wb") as f:
pickle.dump(states_dict, f)
def load_from_file(self, file_path):
"""Loads the saved LRAdjuster object from file_path."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Create path to memory
memory_path = os.path.join(file_path, f"{RegressionLRAdjuster.file_name}.pkl")
# Case memory file exists
if os.path.exists(memory_path):
# Load pickle and fill in attributes
with open(memory_path, "rb") as f:
states_dict = pickle.load(f)
self._history = states_dict["_history"]
self._reset_counter = states_dict["_reset_counter"]
self._patience_counter = states_dict["_patience_counter"]
self._cooldown_counter = states_dict["_cooldown_counter"]
self._wait_counter = states_dict["_wait_counter"]
self._slope = states_dict["_slope"]
self._is_waiting = states_dict["_is_waiting"]
self._in_cooldown = states_dict["_in_cooldown"]
logger.info(f"Loaded RegressionLRAdjuster from {memory_path}")
# Case memory file does not exist
else:
logger.info("Initialized a new RegressionLRAdjuster.")
def _check_patience(self):
"""Determines whether to reduce learning rate or be patient."""
# Do nothing, if still in cooldown period
if self._in_cooldown and self._cooldown_counter < int(self.cooldown_factor * self.period):
return
# Otherwise set cooldown flag to False and reset counter
else:
self._in_cooldown = False
self._cooldown_counter = 0
# Check if negetaive slope too small
if self._slope > self.tolerance:
self._patience_counter += 1
else:
self._patience_counter = max(0, self._patience_counter - 1)
# Check if patience surpassed and issue a reduction in learning rate
if self._patience_counter >= self.patience:
self._reduce_learning_rate()
self._patience_counter = 0
def _reduce_learning_rate(self):
"""Reduces the learning rate by a given factor."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self._reset_counter >= self.num_resets:
self.stopping_issued = True
else:
# Take care of updating learning rate
old_lr = self.optimizer.lr.numpy()
new_lr = round(self.reduction_factor * old_lr, 8)
self.optimizer.lr.assign(new_lr)
self._reset_counter += 1
# Store iteration and learning rate
self._history["iteration"].append(self.optimizer.iterations.numpy())
self._history["learning_rate"].append(old_lr)
# Verbose info to user
logger.info(f"Reducing learning rate from {old_lr:.8f} to: {new_lr:.8f} and entering cooldown...")
# Set cooldown flag to avoid reset for some time given by self.period
self._in_cooldown = True
def _check_waiting(self):
"""Determines whether to compute a new slope or wait."""
# Case currently waiting
if self._is_waiting:
# Case currently waiting but period is over
if self._wait_counter >= self.wait_between_periods - 1:
self._wait_counter = 0
self._is_waiting = False
# Case currently waiting and period not over
else:
self._wait_counter += 1
return True
# Case not waiting
else:
self._is_waiting = True
self._wait_counter += 1
return False
class LossHistory:
"""Helper class to keep track of losses during training."""
file_name = "history"
def __init__(self):
self.latest = 0
self.history = {}
self.val_history = {}
self.loss_names = []
self.val_loss_names = []
self._current_run = 0
self._total_loss = []
self._total_val_loss = []
@property
def total_loss(self):
return np.array(self._total_loss)
@property
def total_val_loss(self):
return np.array(self._total_val_loss)
def last_total_loss(self):
return self._total_loss[-1]
def last_total_val_loss(self):
return self._total_val_loss[-1]
def start_new_run(self):
self._current_run += 1
self.history[f"Run {self._current_run}"] = {}
self.val_history[f"Run {self._current_run}"] = {}
def add_val_entry(self, epoch, val_loss):
"""Add validation entry to loss structure. Assume ``loss_names`` already exists
as an attribute, so no attempt will be made to create names.
"""
# Add epoch key, if specified
if self.val_history[f"Run {self._current_run}"].get(f"Epoch {epoch}") is None:
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"] = []
# Handle dict loss output
if type(val_loss) is dict:
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names = ["Val." + k for k in val_loss.keys()]
# Create and store entry
entry = [v.numpy() if type(v) is not np.ndarray else v for v in val_loss.values()]
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Add entry to total loss
self._total_val_loss.append(sum(entry))
# Handle tuple or list loss output
elif type(val_loss) is tuple or type(val_loss) is list:
entry = [v.numpy() if type(v) is not np.ndarray else v for v in val_loss]
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names = [f"Val.Loss.{l}" for l in range(1, len(entry) + 1)]
# Add entry to total loss
self._total_val_loss.append(sum(entry))
# Assume scalar loss output
else:
self.val_history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(val_loss.numpy())
# Store keys, if none existing
if self.val_loss_names == []:
self.val_loss_names.append("Default.Val.Loss")
# Add entry to total loss
self._total_val_loss.append(val_loss.numpy())
def add_entry(self, epoch, current_loss):
"""Adds loss entry for current epoch into internal memory data structure."""
# Add epoch key, if specified
if self.history[f"Run {self._current_run}"].get(f"Epoch {epoch}") is None:
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"] = []
# Handle dict loss output
if type(current_loss) is dict:
# Store keys, if none existing
if self.loss_names == []:
self.loss_names = [k for k in current_loss.keys()]
# Create and store entry
entry = [v.numpy() if type(v) is not np.ndarray else v for v in current_loss.values()]
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Add entry to total loss
self._total_loss.append(sum(entry))
# Handle tuple or list loss output
elif type(current_loss) is tuple or type(current_loss) is list:
entry = [v.numpy() if type(v) is not np.ndarray else v for v in current_loss]
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(entry)
# Store keys, if none existing
if self.loss_names == []:
self.loss_names = [f"Loss.{l}" for l in range(1, len(entry) + 1)]
# Add entry to total loss
self._total_loss.append(sum(entry))
# Assume scalar loss output
else:
self.history[f"Run {self._current_run}"][f"Epoch {epoch}"].append(current_loss.numpy())
# Store keys, if none existing
if self.loss_names == []:
self.loss_names.append("Default.Loss")
# Add entry to total loss
self._total_loss.append(current_loss.numpy())
def get_running_losses(self, epoch):
"""Compute and return running means of the losses for current epoch."""
means = np.atleast_1d(np.mean(self.history[f"Run {self._current_run}"][f"Epoch {epoch}"], axis=0))
if means.shape[0] == 1:
return {"Avg.Loss": means[0]}
else:
return {"Avg." + k: v for k, v in zip(self.loss_names, means)}
def get_plottable(self):
"""Returns the losses as a nicely formatted pandas DataFrame, in case
only train losses were collected, otherwise a dict of data frames.
"""
# Assume equal lengths per epoch and run
try:
losses_df = self._to_data_frame(self.history, self.loss_names)
if any([v for v in self.val_history.values()]):
# Rremove decay
names = [name for name in self.loss_names if "Decay" not in name]
val_losses_df = self._to_data_frame(self.val_history, names)
return {"train_losses": losses_df, "val_losses": val_losses_df}
return losses_df
# Handle unequal lengths or problems when user kills training with an interrupt
except ValueError as ve:
if any([v for v in self.val_history.values()]):
return {"train_losses": self.history, "val_losses": self.val_history}
return self.history
except TypeError as te:
if any([v for v in self.val_history.values()]):
return {"train_losses": self.history, "val_losses": self.val_history}
return self.history
def flush(self):
"""Returns current history and removes all existing loss history, but keeps loss names."""
history = self.history
val_history = self.val_history
self.history = {}
self.val_history = {}
self._total_loss = []
self._total_val_loss = []
self._current_run = 0
return history, val_history
def save_to_file(self, file_path, max_to_keep):
"""Saves a `LossHistory` object to a pickled dictionary in file_path.
If max_to_keep saved loss history files are found in file_path, the oldest is deleted before a new one is saved.
"""
# Increment history index
self.latest += 1
# Path to history
history_path = os.path.join(file_path, f"{LossHistory.file_name}_{self.latest}.pkl")
# Prepare full history dict
pickle_dict = {
"history": self.history,
"val_history": self.val_history,
"loss_names": self.loss_names,
"val_loss_names": self.val_loss_names,
"_current_run": self._current_run,
"_total_loss": self._total_loss,
"_total_val_loss": self._total_val_loss,
}
# Pickle current
with open(history_path, "wb") as f:
pickle.dump(pickle_dict, f)
# Get list of history checkpoints
history_checkpoints_list = [l for l in os.listdir(file_path) if "history" in l]
# Determine the oldest saved loss history and remove it
if len(history_checkpoints_list) > max_to_keep:
oldest_history_path = os.path.join(file_path, f"history_{self.latest-max_to_keep}.pkl")
os.remove(oldest_history_path)
def load_from_file(self, file_path):
"""Loads the most recent saved `LossHistory` object from `file_path`."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Get list of histories
if os.path.exists(file_path):
history_checkpoints_list = [l for l in os.listdir(file_path) if LossHistory.file_name in l]
else:
history_checkpoints_list = []
# Case history list is not empty
if len(history_checkpoints_list) > 0:
# Determine which file contains the latest LossHistory and load it
file_numbers = [int(re.findall(r"\d+", h)[0]) for h in history_checkpoints_list]
latest_file = history_checkpoints_list[np.argmax(file_numbers)]
latest_number = np.max(file_numbers)
latest_path = os.path.join(file_path, latest_file)
# Load dictionary
with open(latest_path, "rb") as f:
loaded_history_dict = pickle.load(f)
# Fill public entries
self.latest = latest_number
self.history = loaded_history_dict.get("history", {})
self.val_history = loaded_history_dict.get("val_history", {})
self.loss_names = loaded_history_dict.get("loss_names", [])
self.val_loss_names = loaded_history_dict.get("val_loss_names", [])
# Fill private entries
self._current_run = loaded_history_dict.get("_current_run", 0)
self._total_loss = loaded_history_dict.get("_total_loss", [])
self._total_val_loss = loaded_history_dict.get("_total_val_loss", [])
# Verbose
logger.info(f"Loaded loss history from {latest_path}.")
# Case history list is empty
else:
logger.info("Initialized empty loss history.")
def _to_data_frame(self, history, names):
"""Helper function to convert a history dict into a DataFrame."""
losses_list = [pd.melt(pd.DataFrame.from_dict(history[r], orient="index").T) for r in history]
losses_list = pd.concat(losses_list, axis=0).value.to_list()
losses_list = [l for l in losses_list if l is not None]
losses_df = pd.DataFrame(losses_list, columns=names)
return losses_df
class SimulationMemory:
"""Helper class to keep track of a pre-determined number of simulations during training."""
file_name = "memory"
def __init__(self, stores_raw=True, capacity_in_batches=50):
self.stores_raw = stores_raw
self._capacity = capacity_in_batches
self._buffer = [None] * self._capacity
self._idx = 0
self.size_in_batches = 0
def store(self, forward_dict):
"""Stores simulation outputs in `forward_dict`, if internal buffer is not full.
Parameters
----------
forward_dict : dict
The configured outputs of the forward model.
"""
# If full, overwrite at index
if not self.is_full():
self._buffer[self._idx] = forward_dict
self._idx += 1
self.size_in_batches += 1
def get_memory(self):
return deepcopy(self._buffer)
def is_full(self):
"""Returns True if the buffer is full, otherwise False."""
if self._idx >= self._capacity:
return True
return False
def save_to_file(self, file_path):
"""Saves a `SimulationMemory` object to a pickled dictionary in file_path."""
# Create path to memory
memory_path = os.path.join(file_path, f"{SimulationMemory.file_name}.pkl")
# Prepare attributes
full_memory_dict = {}
full_memory_dict["stores_raw"] = self.stores_raw
full_memory_dict["_capacity"] = self._capacity
full_memory_dict["_buffer"] = self._buffer
full_memory_dict["_idx"] = self._idx
full_memory_dict["_size_in_batches"] = self.size_in_batches
# Dump as pickle object
with open(memory_path, "wb") as f:
pickle.dump(full_memory_dict, f)
def load_from_file(self, file_path):
"""Loads the saved `SimulationMemory` object from file_path."""
# Logger init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Create path to memory
memory_path = os.path.join(file_path, f"{SimulationMemory.file_name}.pkl")
# Case memory file exists
if os.path.exists(file_path):
# Load pickle and fill in attributes
with open(memory_path, "rb") as f:
full_memory_dict = pickle.load(f)
self.stores_raw = full_memory_dict["stores_raw"]
self._capacity = full_memory_dict["_capacity"]
self._buffer = full_memory_dict["_buffer"]
self._idx = full_memory_dict["_idx"]
self.size_in_batches = full_memory_dict["_size_in_batches"]
logger.info(f"Loaded simulation memory from {memory_path}")
# Case memory file does not exist
else:
logger.info("Initialized empty simulation memory.")
class MemoryReplayBuffer:
"""Implements a memory replay buffer for simulation-based inference."""
def __init__(self, capacity_in_batches=500):
"""Creates a circular buffer following the logic of experience replay.
Parameters
----------
capacity_in_batches : int, optional, default: 50
The capacity of the buffer in batches of simulations. Could potentially grow
very large, so make sure you pick a reasonable number!
"""
self._capacity = capacity_in_batches
self._buffer = [None] * self._capacity
self._idx = 0
self._size_in_batches = 0
self._is_full = False
def store(self, forward_dict):
"""Stores simulation outputs, if internal buffer is not full.
Parameters
----------
forward_dict : dict
The confogired outputs of the forward model.
"""
# If full, overwrite at index
if self._is_full:
self._overwrite(forward_dict)
# Otherwise still capacity to append
else:
# Add to internal list
self._buffer[self._idx] = forward_dict
# Increment index and # of batches currently stored
self._idx += 1
self._size_in_batches += 1
# Check whether buffer is full and set flag if thats the case
if self._idx == self._capacity:
self._is_full = True
def sample(self):
"""Samples `batch_size` number of parameter vectors and simulations from buffer.
Returns
-------
forward_dict : dict
The (raw or configured) outputs of the forward model.
"""
rand_idx = np.random.default_rng().integers(low=0, high=self._size_in_batches)
return self._buffer[rand_idx]
def _overwrite(self, forward_dict):
"""Overwrites a simulated batch at current position. Only called when the internal buffer is full."""
# Reset index, if at the end of buffer
if self._idx == self._capacity:
self._idx = 0
# Overwrite params and data at index
self._buffer[self._idx] = forward_dict
# Increment index
self._idx += 1
| 31,839 | 37.223289 | 120 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/slcp.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.3 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\theta_{}$".format(i) for i in range(1, 6)],
"configurator_info": "posterior",
}
def prior(lower_bound=-3.0, upper_bound=3.0, rng=None):
"""Generates a random draw from a 5-dimensional uniform prior bounded between
`lower_bound` and `upper_bound` which represents the 5 parameters of the SLCP
simulator.
Parameters
----------
lower_bound : float, optional, default : -3
The lower bound of the uniform prior.
upper_bound : float, optional, default : 3
The upper bound of the uniform prior.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (5, )
A single draw from the 5-dimensional uniform prior.
"""
if rng is None:
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=5)
def simulator(theta, n_obs=4, flatten=True, rng=None):
"""Generates data from the SLCP model designed as a benchmark for a simple likelihood
and a complex posterior due to a non-linear pushforward theta -> x.
See https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.3
Parameters
----------
theta : np.ndarray of shape (theta, D)
The location parameters of the Gaussian likelihood.
n_obs : int, optional, default: 4
The number of observations to generate from the slcp likelihood.
flatten : bool, optional, default: True
A flag to indicate whather a 1D (`flatten=True`) or a 2D (`flatten=False`)
representation of the simulated data is returned.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (n_obs*2, ) or (n_obs, 2), as indictated by the `flatten`
boolean flag. The sample of simulated data from the SLCP model.
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Specify 2D location
loc = np.array([theta[0], theta[1]])
# Specify 2D covariance matrix
s1 = theta[2] ** 2
s2 = theta[3] ** 2
rho = np.tanh(theta[4])
cov = rho * s1 * s2
S_theta = np.array([[s1**2, cov], [cov, s2**2]])
# Obtain given number of draws from the MVN likelihood
x = rng.multivariate_normal(loc, S_theta, size=n_obs)
if flatten:
return x.flatten()
return x
def configurator(forward_dict, mode="posterior", scale_data=30.0, as_summary_condition=False):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, scale_data, as_summary_condition)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict, scale_data)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, scale_data, as_summary_condition)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict, scale_data)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, scale_data, as_summary_condition):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
if as_summary_condition:
input_dict["summary_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
else:
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
return input_dict
def _config_likelihood(forward_dict, scale_data):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32) / scale_data
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
return input_dict
| 5,484 | 37.090278 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/gaussian_mixture.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.7 from the paper https://arxiv.org/pdf/2101.04653.pdf
# NOTE: The paper description uses variances insteas of scales for the likelihood
# but the implementation uses scales. Our implmenetation uses variances
import numpy as np
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\mu_1$", r"$\mu_2$"],
"configurator_info": "posterior",
}
def prior(lower_bound=-10.0, upper_bound=10.0, D=2, rng=None):
"""Generates a random draw from a 2-dimensional uniform prior bounded between
`lower_bound` and `upper_bound` representing the common mean of a 2D Gaussian
mixture model (GMM).
Parameters
----------
lower_bound : float, optional, default : -10
The lower bound of the uniform prior
upper_bound : float, optional, default : 10
The upper bound of the uniform prior
D : int, optional, default: 2
The dimensionality of the mixture model
rng : np.random.Generator or None, default: None
An optional random number generator to use
Returns
-------
theta : np.ndarray of shape (D, )
A single draw from the D-dimensional uniform prior
"""
if rng is None:
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=D)
def simulator(theta, prob=0.5, scale_c1=1.0, scale_c2=0.1, rng=None):
"""Simulates data from the Gaussian mixture model (GMM) with
shared location vector. For more details, see
https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.7
Important: The parameterization uses scales, so use sqrt(var),
if you want to be working with variances instead of scales.
Parameters
----------
theta : np.ndarray of shape (D,)
The D-dimensional vector of parameter locations.
prob : float, optional, default: 0.5
The mixture probability (coefficient).
scale_c1 : float, optional, default: 1.
The scale of the first component
scale_c2 : float, optional, default: 0.1
The scale of the second component
rng : np.random.Generator or None, default: None
An optional random number generator to use
Returns
-------
x : np.ndarray of shape (2,)
The 2D vector generated from the GMM simulator.
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Draw component index
idx = rng.binomial(n=1, p=prob)
# Draw 2D-Gaussian sample according to component index
if idx == 0:
return rng.normal(loc=theta, scale=scale_c1)
return rng.normal(loc=theta, scale=scale_c2)
def configurator(forward_dict, mode="posterior", scale_data=12):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, scale_data)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict, scale_data)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, scale_data)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict, scale_data)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, scale_data):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
return input_dict
def _config_likelihood(forward_dict, scale_data):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32) / scale_data
return input_dict
| 5,282 | 36.468085 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/bernoulli_glm.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.5 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
from scipy.special import expit
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\beta$"] + [r"$f_{}$".format(i) for i in range(1, 10)],
"configurator_info": "posterior",
}
# Global covariance matrix computed once for efficiency
F = np.zeros((9, 9))
for i in range(9):
F[i, i] = 1 + np.sqrt(i / 9)
if i >= 1:
F[i, i - 1] = -2
if i >= 2:
F[i, i - 2] = 1
Cov = np.linalg.inv(F.T @ F)
def prior(rng=None):
"""Generates a random draw from the custom prior over the 10
Bernoulli GLM parameters (1 intercept and 9 weights). Uses a
global covariance matrix `Cov` for the multivariate Gaussian prior
over the model weights, which is pre-computed for efficiency.
Parameters
----------
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (10,)
A single draw from the prior.
"""
if rng is None:
rng = np.random.default_rng()
beta = rng.normal(0, 2)
f = rng.multivariate_normal(np.zeros(9), Cov)
return np.append(beta, f)
def simulator(theta, T=100, scale_by_T=True, rng=None):
"""Simulates data from the custom Bernoulli GLM likelihood, see
https://arxiv.org/pdf/2101.04653.pdf, Task T.5
Important: `scale_sum` should be set to False if the simulator is used
with variable `T` during training, otherwise the information of `T` will
be lost.
Parameters
----------
theta : np.ndarray of shape (10,)
The vector of model parameters (`theta[0]` is intercept, `theta[i], i > 0` are weights).
T : int, optional, default: 100
The simulated duration of the task (eq. the number of Bernoulli draws).
scale_by_T : bool, optional, default: True
A flag indicating whether to scale the summayr statistics by T.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (10,)
The vector of sufficient summary statistics of the data.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Unpack parameters
beta, f = theta[0], theta[1:]
# Generate design matrix
V = rng.normal(size=(9, T))
# Draw from Bernoulli GLM
z = rng.binomial(n=1, p=expit(V.T @ f + beta))
# Compute and return (scaled) sufficient summary statistics
x1 = np.sum(z)
x_rest = V @ z
x = np.append(x1, x_rest)
if scale_by_T:
x /= T
return x
def configurator(forward_dict, mode="posterior"):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration (i.e., joint inference)
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 5,253 | 33.339869 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/two_moons.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.8 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\theta_1$", r"$\theta_2$"],
"configurator_info": "posterior",
}
def prior(lower_bound=-1.0, upper_bound=1.0, rng=None):
"""Generates a random draw from a 2-dimensional uniform prior bounded between
`lower_bound` and `upper_bound` which represents the two parameters of the two moons simulator.
Parameters
----------
lower_bound : float, optional, default : -1
The lower bound of the uniform prior.
upper_bound : float, optional, default : 1
The upper bound of the uniform prior.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (2,)
A single draw from the 2-dimensional uniform prior.
"""
if rng is None:
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=2)
def simulator(theta, rng=None):
"""Implements data generation from the two-moons model with a bimodal posterior.
See https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.8
Parameters
----------
theta : np.ndarray of shape (2,)
The vector of two model parameters.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (2,)
The 2D vector generated from the two moons simulator.
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Generate noise
alpha = rng.uniform(low=-0.5 * np.pi, high=0.5 * np.pi)
r = rng.normal(loc=0.1, scale=0.01)
# Forward process
rhs1 = np.array([r * np.cos(alpha) + 0.25, r * np.sin(alpha)])
rhs2 = np.array([-np.abs(theta[0] + theta[1]) / np.sqrt(2.0), (-theta[0] + theta[1]) / np.sqrt(2.0)])
return rhs1 + rhs2
def configurator(forward_dict, mode="posterior"):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict)
# Case only plikelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration (i.e., joint inference)
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 4,571 | 35 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/inverse_kinematics.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task 1 from the paper https://arxiv.org/pdf/2101.10763.pdf
import numpy as np
bayesflow_benchmark_info = {"simulator_is_batched": False, "parameter_names": None, "configurator_info": "posterior"}
def prior(scales=None, rng=None):
"""Generates a random draw from a 4-dimensional Gaussian prior distribution with a
spherical convariance matrix. The parameters represent a robot's arm configuration,
with the first parameter indicating the arm's height and the remaining three are
angles.
Parameters
----------
scales : np.ndarray of shape (4,) or None, optional, default : None
The four scales of the Gaussian prior.
If ``None`` provided, the scales from https://arxiv.org/pdf/2101.10763.pdf
will be used: [0.25, 0.5, 0.5, 0.5]
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (4, )
A single draw from the 4-dimensional Gaussian prior.
"""
if rng is None:
rng = np.random.default_rng()
if scales is None:
scales = np.array([0.25, 0.5, 0.5, 0.5])
return rng.normal(loc=0, scale=scales)
def simulator(theta, l1=0.5, l2=0.5, l3=1.0, **kwargs):
"""Returns the 2D coordinates of a robot arm given parameter vector.
The first parameter represents the arm's height and the remaining three
correspond to angles.
Parameters
----------
theta : np.ndarray of shape (theta, )
The four model parameters which will determine the coordinates
l1 : float, optional, default: 0.5
The length of the first segment
l2 : float, optional, default: 0.5
The length of the second segment
l3 : float, optional, default: 1.0
The length of the third segment
**kwargs : dict, optional, default: {}
Used for comptability with the other benchmarks, as the model is deterministic
Returns
-------
x : np.ndarray of shape (2, )
The 2D coordinates of the arm
"""
# Determine 2D position
x1 = l1 * np.sin(theta[1])
x1 += l2 * np.sin(theta[1] + theta[2])
x1 += l3 * np.sin(theta[1] + theta[2] + theta[3]) + theta[0]
x2 = l1 * np.cos(theta[1])
x2 += l2 * np.cos(theta[1] + theta[2])
x2 += l3 * np.cos(theta[1] + theta[2] + theta[3])
return np.array([x1, x2])
def configurator(forward_dict, mode="posterior"):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 4,897 | 36.676923 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/gaussian_linear_uniform.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.2 from the paper https://arxiv.org/pdf/2101.04653.pdf
# NOTE: The paper description uses a variance of 0.1 for likelihood function
# but the implementation uses scale = 0.1 Our implmenetation uses a default scale
# of 0.1 for consistency with the implementation.
import numpy as np
bayesflow_benchmark_info = {"simulator_is_batched": True, "parameter_names": None, "configurator_info": "posterior"}
def prior(D=10, lower_bound=-1.0, upper_bound=1.0, rng=None):
"""Generates a random draw from a D-dimensional uniform prior bounded between
`lower_bound` and `upper_bound` which represents the location vector of
a (conjugate) Gaussian likelihood.
Parameters
----------
D : int, optional, default : 10
The dimensionality of the Gaussian prior.
lower_bound : float, optional, default : -1.
The lower bound of the uniform prior.
upper_bound : float, optional, default : 1.
The upper bound of the uniform prior.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (D, )
A single draw from the D-dimensional uniform prior.
"""
if rng is None:
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=D)
def simulator(theta, n_obs=None, scale=0.1, rng=None):
"""Generates batched draws from a D-dimenional Gaussian distributions given a batch of
location (mean) parameters of D dimensions. Assumes a spherical convariance matrix given
by scale * I_D.
Parameters
----------
theta : np.ndarray of shape (theta, D)
The location parameters of the Gaussian likelihood.
n_obs : int or None, optional, default: None
The number of observations to draw from the likelihood given the location
parameter `theta`. If None, a single draw is produced.
scale : float, optional, default : 0.1
The scale of the Gaussian likelihood.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (theta.shape[0], theta.shape[1]) if n_obs is None,
else np.ndarray of shape (theta.shape[0], n_obs, theta.shape[1])
A single draw or a sample from a batch of Gaussians.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Generate prior predictive samples, possibly a single if n_obs is None
if n_obs is None:
return rng.normal(loc=theta, scale=scale)
x = rng.normal(loc=theta, scale=scale, size=(n_obs, theta.shape[0], theta.shape[1]))
return np.transpose(x, (1, 0, 2))
def configurator(forward_dict, mode="posterior"):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration (i.e., joint inference)
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 5,285 | 39.351145 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/slcp_distractors.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.4 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
from scipy.stats import multivariate_t
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\theta_{}$".format(i) for i in range(1, 6)],
"configurator_info": "posterior",
}
def get_random_student_t(dim=2, mu_scale=15, shape_scale=0.01, rng=None):
"""A helper function to create a "frozen" multivariate student-t distribution of dimensions `dim`.
Parameters
----------
dim : int, optional, default: 2
The dimensionality of the student-t distribution.
mu_scale : float, optional, default: 15
The scale of the zero-centered Gaussian prior from which the mean vector
of the student-t distribution is drawn.
shape_scale : float, optional, default: 0.01
The scale of the assumed `np.eye(dim)` shape matrix. The default is chosen to keep
the scale of the distractors and observations relatively similar.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
student : callable (scipy.stats._multivariate.multivariate_t_frozen)
The student-t generator.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Draw mean
mu = mu_scale * rng.normal(size=dim)
# Return student-t object
return multivariate_t(loc=mu, shape=shape_scale, df=2, allow_singular=True, seed=rng)
def draw_mixture_student_t(num_students, n_draws=46, dim=2, mu_scale=15.0, rng=None):
"""Helper function to generate `n_draws` random draws from a mixture of `num_students`
multivariate Student-t distributions.
Uses the function `get_random_student_t` to create each of the studen-t callable objects.
Parameters
----------
num_students : int
The number of multivariate student-t mixture components
n_draws : int, optional, default: 46
The number of draws to obtain from the mixture distribution.
dim : int, optional, default: 2
The dimensionality of each student-t distribution in the mixture.
mu_scale : float, optional, default: 15
The scale of the zero-centered Gaussian prior from which the mean vector
of each student-t distribution in the mixture is drawn.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
sample : np.ndarray of shape (n_draws, dim)
The random draws from the mixture of students.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Obtain a list of scipy frozen distributions (each will have a different mean)
students = [get_random_student_t(dim, mu_scale, rng=rng) for _ in range(num_students)]
# Obtain the sample of n_draws from the mixture and return
sample = [students[rng.integers(low=0, high=num_students)].rvs() for _ in range(n_draws)]
return np.array(sample)
def prior(lower_bound=-3.0, upper_bound=3.0, rng=None):
"""Generates a random draw from a 5-dimensional uniform prior bounded between
`lower_bound` and `upper_bound`.
Parameters
----------
lower_bound : float, optional, default : -3
The lower bound of the uniform prior.
upper_bound : float, optional, default : 3
The upper bound of the uniform prior.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (5, )
A single draw from the 5-dimensional uniform prior.
"""
if rng is None:
rng = np.random.default_rng()
return rng.uniform(low=lower_bound, high=upper_bound, size=5)
def simulator(theta, n_obs=4, n_dist=46, dim=2, mu_scale=15.0, flatten=True, rng=None):
"""Generates data from the SLCP model designed as a benchmark for a simple likelihood
and a complex posterior due to a non-linear pushforward theta -> x. In addition, it
outputs uninformative distractor data.
See https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.4
Parameters
----------
theta : np.ndarray of shape (theta, D)
The location parameters of the Gaussian likelihood.
n_obs : int, optional, default: 4
The number of observations to generate from the slcp likelihood.
n_dist : int, optional, default: 46
The number of distractor to draw from the distractor likelihood.
dim : int, optional, default: 2
The dimensionality of each student-t distribution in the mixture.
mu_scale : float, optional, default: 15
The scale of the zero-centered Gaussian prior from which the mean vector
of each student-t distribution in the mixture is drawn.
flatten : bool, optional, default: True
A flag to indicate whather a 1D (`flatten=True`) or a 2D (`flatten=False`)
representation of the simulated data is returned.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (n_obs*2 + n_dist*2,) if `flatten=True`, otherwise
np.ndarray of shape (n_obs + n_dist, 2) if `flatten=False`
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Specify 2D location
loc = np.array([theta[0], theta[1]])
# Specify 2D covariance matrix
s1 = theta[2] ** 2
s2 = theta[3] ** 2
rho = np.tanh(theta[4])
cov = rho * s1 * s2
S_theta = np.array([[s1**2, cov], [cov, s2**2]])
# Obtain informative part of the data
x_info = rng.multivariate_normal(loc, S_theta, size=n_obs)
# Obtain uninformative part of the data
x_uninfo = draw_mixture_student_t(num_students=20, n_draws=n_dist, dim=dim, mu_scale=mu_scale, rng=rng)
# Concatenate informative with uninformative and return
x = np.concatenate([x_info, x_uninfo], axis=0)
if flatten:
return x.flatten()
return x
def configurator(forward_dict, mode="posterior", scale_data=50.0, as_summary_condition=False):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, scale_data, as_summary_condition)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict, scale_data)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, scale_data, as_summary_condition)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict, scale_data)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, scale_data, as_summary_condition):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
if as_summary_condition:
input_dict["summary_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
else:
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
return input_dict
def _config_likelihood(forward_dict, scale_data):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32) / scale_data
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
return input_dict
| 9,028 | 38.256522 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/gaussian_linear.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.1 from the paper https://arxiv.org/pdf/2101.04653.pdf
# NOTE: The paper description uses a variance of 0.1 for the prior and likelihood
# but the implementation uses scale = 0.1 Our implmenetation uses a default scale
# of 0.1 for consistency with the implementation.
import numpy as np
bayesflow_benchmark_info = {"simulator_is_batched": True, "parameter_names": None, "configurator_info": "posterior"}
def prior(D=10, scale=0.1, rng=None):
"""Generates a random draw from a D-dimensional Gaussian prior distribution with a
spherical scale matrix given by sigma * I_D. Represents the location vector of
a (conjugate) Gaussian likelihood.
Parameters
----------
D : int, optional, default : 10
The dimensionality of the Gaussian prior distribution.
scale : float, optional, default : 0.1
The scale of the Gaussian prior.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (D, )
A single draw from the D-dimensional Gaussian prior.
"""
if rng is None:
rng = np.random.default_rng()
return scale * rng.normal(size=D)
def simulator(theta, n_obs=None, scale=0.1, rng=None):
"""Generates batched draws from a D-dimenional Gaussian distributions given a batch of
location (mean) parameters of D dimensions. Assumes a spherical convariance matrix given
by scale * I_D.
Parameters
----------
theta : np.ndarray of shape (theta, D)
The location parameters of the Gaussian likelihood.
n_obs : int or None, optional, default: None
The number of observations to draw from the likelihood given the location
parameter `theta`. If `n obs is None`, a single draw is produced.
scale : float, optional, default : 0.1
The scale of the Gaussian likelihood.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (theta.shape[0], theta.shape[1]) if n_obs is None,
else np.ndarray of shape (theta.shape[0], n_obs, theta.shape[1])
A single draw or a sample from a batch of Gaussians.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Generate prior predictive samples, possibly a single if n_obs is None
if n_obs is None:
return rng.normal(loc=theta, scale=scale)
x = rng.normal(loc=theta, scale=scale, size=(n_obs, theta.shape[0], theta.shape[1]))
return np.transpose(x, (1, 0, 2))
def configurator(forward_dict, mode="posterior"):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict)
# Case only plikelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration (i.e., joint inference)
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 5,163 | 39.031008 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/lotka_volterra.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.10 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
from scipy.integrate import odeint
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\alpha$", r"$\beta$", r"$\gamma$", r"$\delta$"],
"configurator_info": "posterior",
}
def prior(rng=None):
"""Generates a random draw from a 4-dimensional (independent) lognormal prior
which represents the four contact parameters of the Lotka-Volterra model.
Parameters
----------
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (4,)
A single draw from the 4-dimensional prior.
"""
if rng is None:
rng = np.random.default_rng()
theta = rng.lognormal(mean=[-0.125, -3, -0.125, -3], sigma=0.5)
return theta
def _deriv(x, t, alpha, beta, gamma, delta):
"""Helper function for scipy.integrate.odeint."""
X, Y = x
dX = alpha * X - beta * X * Y
dY = -gamma * Y + delta * X * Y
return dX, dY
def simulator(theta, X0=30, Y0=1, T=20, subsample=10, flatten=True, obs_noise=0.1, rng=None):
"""Runs a Lotka-Volterra simulation for T time steps and returns `subsample` evenly spaced
points from the simulated trajectory, given contact parameters `theta`.
See https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.10.
Parameters
----------
theta : np.ndarray of shape (2,)
The 2-dimensional vector of disease parameters.
X0 : float, optional, default: 30
Initial number of prey species.
Y0 : float, optional, default: 1
Initial number of predator species.
T : T, optional, default: 20
The duration (time horizon) of the simulation.
subsample : int or None, optional, default: 10
The number of evenly spaced time points to return. If None,
no subsampling will be performed and all T timepoints will be returned.
flatten : bool, optional, default: True
A flag to indicate whather a 1D (`flatten=True`) or a 2D (`flatten=False`)
representation of the simulated data is returned.
obs_noise : float, optional, default: 0.1
The standard deviation of the log-normal likelihood.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (subsample, 2) or (subsample*2,) if `subsample is not None`,
otherwise shape (T, 2) or (T*2,) if `subsample is None`.
The time series of simulated predator and pray populations
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Create vector (list) of initial conditions
x0 = X0, Y0
# Unpack parameter vector into scalars
alpha, beta, gamma, delta = theta
# Prepate time vector between 0 and T of length T
t_vec = np.linspace(0, T, T)
# Integrate using scipy and retain only infected (2-nd dimension)
pp = odeint(_deriv, x0, t_vec, args=(alpha, beta, gamma, delta))
# Subsample evenly the specified number of points, if specified
if subsample is not None:
pp = pp[:: (T // subsample)]
# Ensure minimum count is 0, which will later pass by log(0 + 1)
pp[pp < 0] = 0.0
# Add noise, decide whether to flatten and return
x = rng.lognormal(np.log1p(pp), sigma=obs_noise)
if flatten:
return x.flatten()
return x
def configurator(forward_dict, mode="posterior", scale_data=1000, as_summary_condition=False):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, scale_data, as_summary_condition)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict, scale_data)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, scale_data, as_summary_condition)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict, scale_data)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, scale_data, as_summary_condition):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
if as_summary_condition:
input_dict["summary_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
else:
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32) / scale_data
return input_dict
def _config_likelihood(forward_dict, scale_data):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32) / scale_data
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
return input_dict
| 6,416 | 36.747059 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/__init__.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module implements all 10 benchmark models (tasks) from the paper:
#
# Lueckmann, J. M., Boelts, J., Greenberg, D., Goncalves, P., & Macke, J. (2021).
# Benchmarking simulation-based inference.
# In International Conference on Artificial Intelligence and Statistics (pp. 343-351). PMLR.
#
# https://arxiv.org/pdf/2101.04653.pdf
#
# However, it lifts the dependency on `PyTorch` and implements the models as ready-made
# tuples of prior and simulator functions capable of interacting with BayesFlow.
# Note: All default hyperparameters are set according to the paper.
import importlib
from functools import partial
import numpy as np
from bayesflow.exceptions import ConfigurationError
from bayesflow.simulation import GenerativeModel, Prior
available_benchmarks = [
"gaussian_linear",
"gaussian_linear_uniform",
"slcp",
"slcp_distractors",
"bernoulli_glm",
"bernoulli_glm_raw",
"gaussian_mixture",
"two_moons",
"sir",
"lotka_volterra",
"inverse_kinematics",
]
def get_benchmark_module(benchmark_name):
"""Loads the corresponding benchmark file under bayesflow.benchmarks.<benchmark_name> as a
module and returns it.
"""
try:
benchmark_module = importlib.import_module(f"bayesflow.benchmarks.{benchmark_name}")
return benchmark_module
except ModuleNotFoundError:
raise ConfigurationError(f"You need to provide a valid name from: {available_benchmarks}")
class Benchmark:
"""Interface class for a benchmark."""
def __init__(self, name, mode="joint", seed=None, **kwargs):
"""Creates a benchmark generative model by using the blueprint contained
in a benchmark file.
Parameters
----------
name : str
The name of the benchmark file (without suffix, i.e., .py) to use as a blueprint.
mode : str, otpional, default: 'joint'
The mode in which to configure the data, should be in ('joint', 'posterior', 'likelihood')
seed : int or None, optional, default: None
The seed to use if reproducibility is required. Will be passed to a numpy RNG.
**kwargs : dict
Optional keyword arguments.
If 'sim_kwargs' is present, key-value pairs will be interpreted as arguments for the simulator
and propagated accordingly.
If 'prior_kwargs' is present, key-value pairs will be interpreted as arguments for the prior
and propagated accordingly.
"""
self.benchmark_name = name
self._rng = np.random.default_rng(seed)
self.benchmark_module = get_benchmark_module(self.benchmark_name)
self.benchmark_info = getattr(self.benchmark_module, "bayesflow_benchmark_info")
# Prepare partial simulator function with optional keyword arguments
if kwargs.get("sim_kwargs") is not None:
_simulator = partial(
getattr(self.benchmark_module, "simulator"), rng=self._rng, **kwargs.pop("sim_kwargs", {})
)
else:
_simulator = partial(getattr(self.benchmark_module, "simulator"), rng=self._rng)
# Prepare partial prior function with optional keyword arguments
if kwargs.get("prior_kwargs") is not None:
_prior = partial(getattr(self.benchmark_module, "prior"), rng=self._rng, **kwargs.pop("prior_kwargs", {}))
else:
_prior = partial(getattr(self.benchmark_module, "prior"), rng=self._rng)
# Prepare generative model
self.generative_model = GenerativeModel(
prior=Prior(
prior_fun=_prior,
param_names=self.benchmark_info["parameter_names"],
),
simulator=_simulator,
simulator_is_batched=self.benchmark_info["simulator_is_batched"],
name=self.benchmark_name,
)
self.configurator = getattr(self.benchmark_module, "configurator")
self.configurator = partial(self.configurator, mode=mode)
| 5,141 | 40.467742 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/bernoulli_glm_raw.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.6 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
from scipy.special import expit
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\beta$"] + [r"$f_{}$".format(i) for i in range(1, 10)],
"configurator_info": "posterior",
}
# Global covariance matrix computed once for efficiency
F = np.zeros((9, 9))
for i in range(9):
F[i, i] = 1 + np.sqrt(i / 9)
if i >= 1:
F[i, i - 1] = -2
if i >= 2:
F[i, i - 2] = 1
Cov = np.linalg.inv(F.T @ F)
def prior(rng=None):
"""Generates a random draw from the custom prior over the 10
Bernoulli GLM parameters (1 intercept and 9 weights). Uses a
global covariance matrix `Cov` for the multivariate Gaussian prior
over the model weights, which is pre-computed for efficiency.
Parameters
----------
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (10,)
A single draw from the prior.
"""
if rng is None:
rng = np.random.default_rng()
beta = rng.normal(0, 2)
f = rng.multivariate_normal(np.zeros(9), Cov)
return np.append(beta, f)
def simulator(theta, T=100, rng=None):
"""Simulates data from the custom Bernoulli GLM likelihood, see:
https://arxiv.org/pdf/2101.04653.pdf, Task T.6
Returns the raw Bernoulli data.
Parameters
----------
theta : np.ndarray of shape (10,)
The vector of model parameters (`theta[0]` is intercept, `theta[i], i > 0` are weights)
T : int, optional, default: 100
The simulated duration of the task (eq. the number of Bernoulli draws).
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (T, 10)
The full simulated set of Bernoulli draws and design matrix.
Should be configured with an additional trailing dimension if the data is (properly) to be treated as a set.
"""
# Use default RNG, if None provided
if rng is None:
rng = np.random.default_rng()
# Unpack parameters
beta, f = theta[0], theta[1:]
# Generate design matrix
V = rng.normal(size=(9, T))
# Draw from Bernoulli GLM and return
z = rng.binomial(n=1, p=expit(V.T @ f + beta))
return np.c_[np.expand_dims(z, axis=-1), V.T]
def configurator(forward_dict, mode="posterior", as_summary_condition=False):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, as_summary_condition)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, as_summary_condition)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, as_summary_condition):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
# Return 3D output
if as_summary_condition:
input_dict["summary_conditions"] = forward_dict["sim_data"].astype(np.float32)
# Flatten along 2nd and 3rd axis
else:
x = forward_dict["sim_data"]
x = x.reshape(x.shape[0], -1)
input_dict["direct_conditions"] = x.astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
# Create observables (adding a dummy var)
obs = forward_dict["sim_data"][:, :, 0]
obs_dummy = np.random.randn(obs.shape[0], obs.shape[1])
input_dict["observables"] = np.stack([obs, obs_dummy], axis=2).astype(np.float32)
# Create condition (repeating param draws)
design = forward_dict["sim_data"][:, :, 1:]
T = design.shape[1]
params_rep = np.stack([forward_dict["prior_draws"]] * T, axis=1)
input_dict["conditions"] = np.concatenate([design, params_rep], axis=-1).astype(np.float32)
return input_dict
| 5,657 | 34.584906 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/benchmarks/sir.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Corresponds to Task T.9 from the paper https://arxiv.org/pdf/2101.04653.pdf
import numpy as np
from scipy.integrate import odeint
bayesflow_benchmark_info = {
"simulator_is_batched": False,
"parameter_names": [r"$\beta$", r"$\gamma$"],
"configurator_info": "posterior",
}
def prior(rng=None):
"""Generates a random draw from a 2-dimensional (independent) lognormal prior
which represents the contact and recovery rate parameters of a basic SIR model.
Parameters
----------
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
theta : np.ndarray of shape (2,)
A single draw from the 2-dimensional prior.
"""
if rng is None:
rng = np.random.default_rng()
theta = rng.lognormal(mean=[np.log(0.4), np.log(1 / 8)], sigma=[0.5, 0.2])
return theta
def _deriv(x, t, N, beta, gamma):
"""Helper function for scipy.integrate.odeint."""
S, I, R = x
dS = -beta * S * I / N
dI = beta * S * I / N - gamma * I
dR = gamma * I
return dS, dI, dR
def simulator(theta, N=1e6, T=160, I0=1.0, R0=0.0, subsample=10, total_count=1000, scale_by_total=True, rng=None):
"""Runs a basic SIR model simulation for T time steps and returns `subsample` evenly spaced
points from the simulated trajectory, given disease parameters (contact and recovery rate) `theta`.
See https://arxiv.org/pdf/2101.04653.pdf, Benchmark Task T.9.
Note, that the simulator will scale the outputs between 0 and 1.
Parameters
----------
theta : np.ndarray of shape (2,)
The 2-dimensional vector of disease parameters.
N : float, optional, default: 1e6 = 1 000 000
The size of the simulated population.
T : T, optional, default: 160
The duration (time horizon) of the simulation.
I0 : float, optional, default: 1.
The number of initially infected individuals.
R0 : float, optional, default: 0.
The number of initially recovered individuals.
subsample : int or None, optional, default: 10
The number of evenly spaced time points to return. If None,
no subsampling will be performed and all T timepoints will be returned.
total_count : int, optional, default: 1000
The N parameter of the binomial noise distribution. Used just
for scaling the data and magnifying the effect of noise, such that
max infected == total_count.
scale_by_total : bool, optional, default: True
Scales the outputs by ``total_count`` if set to True.
rng : np.random.Generator or None, default: None
An optional random number generator to use.
Returns
-------
x : np.ndarray of shape (subsample,) or (T,) if subsample=None
The time series of simulated infected individuals. A trailing dimension of 1 should
be added by a BayesFlow configurator if the data is (properly) to be treated as time series.
"""
# Use default RNG, if None specified
if rng is None:
rng = np.random.default_rng()
# Create vector (list) of initial conditions
x0 = N - I0 - R0, I0, R0
# Unpack parameter vector into scalars
beta, gamma = theta
# Prepate time vector between 0 and T of length T
t_vec = np.linspace(0, T, T)
# Integrate using scipy and retain only infected (2-nd dimension)
irt = odeint(_deriv, x0, t_vec, args=(N, beta, gamma))[:, 1]
# Subsample evenly the specified number of points, if specified
if subsample is not None:
irt = irt[:: (T // subsample)]
# Truncate irt, so that small underflow below zero becomes zero
irt = np.maximum(irt, 0.0)
# Add noise and scale, if indicated
x = rng.binomial(n=total_count, p=irt / N)
if scale_by_total:
x = x / total_count
return x
def configurator(forward_dict, mode="posterior", as_summary_condition=False):
"""Configures simulator outputs for use in BayesFlow training."""
# Case only posterior configuration
if mode == "posterior":
input_dict = _config_posterior(forward_dict, as_summary_condition)
# Case only likelihood configuration
elif mode == "likelihood":
input_dict = _config_likelihood(forward_dict)
# Case posterior and likelihood configuration
elif mode == "joint":
input_dict = {}
input_dict["posterior_inputs"] = _config_posterior(forward_dict, as_summary_condition)
input_dict["likelihood_inputs"] = _config_likelihood(forward_dict)
# Throw otherwise
else:
raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!')
return input_dict
def _config_posterior(forward_dict, as_summary_condition):
"""Helper function for posterior configuration."""
input_dict = {}
input_dict["parameters"] = forward_dict["prior_draws"].astype(np.float32)
if as_summary_condition:
input_dict["summary_conditions"] = forward_dict["sim_data"].astype(np.float32)[:, :, np.newaxis]
else:
input_dict["direct_conditions"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
def _config_likelihood(forward_dict):
"""Helper function for likelihood configuration."""
input_dict = {}
input_dict["conditions"] = forward_dict["prior_draws"].astype(np.float32)
input_dict["observables"] = forward_dict["sim_data"].astype(np.float32)
return input_dict
| 6,659 | 36.840909 | 118 |
py
|
BayesFlow
|
BayesFlow-master/bayesflow/experimental/__init__.py
| 0 | 0 | 0 |
py
|
|
BayesFlow
|
BayesFlow-master/bayesflow/experimental/rectifiers.py
|
# Copyright (c) 2022 The BayesFlow Developers
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import tensorflow as tf
import tensorflow_probability as tfp
import bayesflow.default_settings as defaults
from bayesflow.computational_utilities import compute_jacobian_trace
from bayesflow.exceptions import SummaryStatsError
from bayesflow.helper_networks import MCDropout
from bayesflow.losses import mmd_summary_space
class DriftNetwork(tf.keras.Model):
"""Implements a learnable velocity field for a neural ODE. Will typically be used
in conjunction with a ``RectifyingFlow`` instance, as proposed by [1] in the context
of unconditional image generation.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
"""
def __init__(
self, target_dim, num_dense=3, dense_args=None, dropout=True, mc_dropout=False, dropout_prob=0.05, **kwargs
):
"""Creates a learnable velocity field instance to be used in the context of rectifying
flows or neural ODEs.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
Parameters
----------
target_dim : int
The problem dimensionality (e.g., in parameter estimation, the number of parameters)
num_dense : int, optional, default: 3
The number of hidden layers for the inner fully-connected network
dense_args : dict or None, optional, default: None
The arguments to be passed to ``tf.keras.layers.Dense`` constructor. If None, default settings
will be fetched from ``bayesflow.default_settings``.
dropout : bool, optional, default: True
Whether to use dropout in-between the hidden layers.
mc_dropout : bool, optional, default: False
Whether to use dropout Monte Carlo dropout (i.e., Bayesian approximation) during inference
dropout_prob : float in (0, 1), optional, default: 0.05
The dropout probability. Only has effecft if ``dropout=True`` or ``mc_dropout=True``
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the ``tf.keras.Model.__init__`` method.
"""
super().__init__(**kwargs)
self.latent_dim = target_dim
if dense_args is None:
dense_args = defaults.DEFAULT_SETTING_DENSE_RECT
self.net = tf.keras.Sequential()
for _ in range(num_dense):
self.net.add(tf.keras.layers.Dense(**dense_args))
if mc_dropout:
self.net.add(MCDropout(dropout_prob))
elif dropout:
self.net.add(tf.keras.layers.Dropout(dropout_prob))
else:
pass
self.net.add(tf.keras.layers.Dense(self.latent_dim))
self.net.build(input_shape=())
def call(self, target_vars, latent_vars, time, condition, **kwargs):
"""Performs a linear interpolation between target and latent variables
over time (i.e., a single ODE step during training).
Parameters
----------
target_vars : tf.Tensor of shape (batch_size, ..., num_targets)
The variables of interest (e.g., parameters) over which we perform inference.
latent_vars : tf.Tensor of shape (batch_size, ..., num_targets)
The sampled random variates from the base distribution.
time : tf.Tensor of shape (batch_size, ..., 1)
A vector of time indices in (0, 1)
condition : tf.Tensor of shape (batch_size, ..., condition_dim)
The optional conditioning variables (e.g., as returned by a summary network)
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the ``tf.keras.Model`` call() method
"""
diff = target_vars - latent_vars
wdiff = time * target_vars + (1 - time) * latent_vars
drift = self.drift(wdiff, time, condition, **kwargs)
return diff, drift
def drift(self, target_t, time, condition, **kwargs):
"""Returns the drift at target_t time given optional condition(s).
Parameters
----------
target_t : tf.Tensor of shape (batch_size, ..., num_targets)
The variables of interest (e.g., parameters) over which we perform inference.
time : tf.Tensor of shape (batch_size, ..., 1)
A vector of time indices in (0, 1)
condition : tf.Tensor of shape (batch_size, ..., condition_dim)
The optional conditioning variables (e.g., as returned by a summary network)
**kwargs : dict, optional, default: {}
Optional keyword arguments passed to the drift network.
"""
if condition is not None:
inp = tf.concat([target_t, condition, time], axis=-1)
else:
inp = tf.concat([target_t, time], axis=-1)
return self.net(inp, **kwargs)
class RectifiedDistribution(tf.keras.Model):
"""Implements a rectifying flows according to [1]. To be used as an alternative
to a normalizing flow in a BayesFlow pipeline.
[1] Liu, X., Gong, C., & Liu, Q. (2022).
Flow straight and fast: Learning to generate and transfer data with rectified flow.
arXiv preprint arXiv:2209.03003.
"""
def __init__(self, drift_net, summary_net=None, latent_dist=None, loss_fun=None, summary_loss_fun=None, **kwargs):
"""Initializes a composite neural network to represent an amortized approximate posterior through
for a rectifying flow.
Parameters
----------
drift_net : tf.keras.Model
A neural network for the velocity field (drift) of the learnable ODE
summary_net : tf.keras.Model or None, optional, default: None
An optional summary network to compress non-vector data structures.
latent_dist : callable or None, optional, default: None
The latent distribution towards which to optimize the networks. Defaults to
a multivariate unit Gaussian.
loss_fun : callable or None, optional, default: None
The loss function for "rectifying" the velocity field. If ``None``, defaults
to tf.keras.losses.logcosh. Sensible alternatives are MSE (as in [])
summary_loss_fun : callable, str, or None, optional, default: None
The loss function which accepts the outputs of the summary network. If ``None``, no loss is provided
and the summary space will not be shaped according to a known distribution (see [2]).
If ``summary_loss_fun='MMD'``, the default loss from [2] will be used.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the ``__init__`` method of a ``tf.keras.Model`` instance.
Important
----------
- If no ``summary_net`` is provided, then the output dictionary of your generative model should not contain
any ``summary_conditions``, i.e., ``summary_conditions`` should be set to ``None``, otherwise these will be ignored.
"""
super().__init__(**kwargs)
self.drift_net = drift_net
self.summary_net = summary_net
self.latent_dim = drift_net.latent_dim
self.latent_dist = self._determine_latent_dist(latent_dist)
self.loss_fun = self._determine_loss(loss_fun)
self.summary_loss = self._determine_summary_loss(summary_loss_fun)
def call(self, input_dict, return_summary=False, num_eval_points=32, **kwargs):
"""Performs a forward pass through the summary and drift network given an input dictionary.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``targets`` - the latent model parameters over which a condition density is learned
``summary_conditions`` - the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
return_summary : bool, optional, default: False
A flag which determines whether the learnable data summaries (representations) are returned or not.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
net_out or (net_out, summary_out)
"""
# Concatenate conditions, if given
summary_out, full_cond = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
**kwargs,
)
# Extract target variables
target_vars = input_dict[defaults.DEFAULT_KEYS["targets"]]
# Extract batch size (autograph friendly)
batch_size = tf.shape(target_vars)[0]
# Sample latent variables
latent_vars = self.latent_dist.sample(batch_size)
# Do a little trick for less noisy estimator
target_vars = tf.stack([target_vars] * num_eval_points, axis=1)
latent_vars = tf.stack([latent_vars] * num_eval_points, axis=1)
full_cond = tf.stack([full_cond] * num_eval_points, axis=1)
# Sample time
time = tf.random.uniform((batch_size, num_eval_points, 1))
# Compute drift
net_out = self.drift_net(target_vars, latent_vars, time, full_cond, **kwargs)
# Return summary outputs or not, depending on parameter
if return_summary:
return net_out, summary_out
return net_out
def compute_loss(self, input_dict, **kwargs):
"""Computes the loss of the posterior amortizer given an input dictionary, which will
typically be the output of a Bayesian ``GenerativeModel`` instance.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``targets`` - the latent variables over which a condition density is learned
``summary_conditions`` - the conditioning variables that are first passed through a summary network
``direct_conditions`` - the conditioning variables that the directly passed to the inference network
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
For instance, ``kwargs={'training': True}`` is passed automatically during training.
Returns
-------
total_loss : tf.Tensor of shape (1,) - the total computed loss given input variables
"""
net_out, sum_out = self(input_dict, return_summary=True, **kwargs)
diff, drift = net_out
loss = self.loss_fun(diff, drift)
# Case summary loss should be computed
if self.summary_loss is not None:
sum_loss = self.summary_loss(sum_out)
# Case no summary loss, simply add 0 for convenience
else:
sum_loss = 0.0
# Compute and return total loss
total_loss = tf.reduce_mean(loss) + sum_loss
return total_loss
def sample(self, input_dict, n_samples, to_numpy=True, step_size=1e-3, **kwargs):
"""Generates random draws from the approximate posterior given a dictionary with conditonal variables.
Parameters
----------
input_dict : dict
Input dictionary containing the following mandatory keys, if ``DEFAULT_KEYS`` unchanged:
``summary_conditions`` : the conditioning variables (including data) that are first passed through a summary network
``direct_conditions`` : the conditioning variables that the directly passed to the inference network
n_samples : int
The number of posterior draws (samples) to obtain from the approximate posterior
to_numpy : bool, optional, default: True
Flag indicating whether to return the samples as a ``np.ndarray`` or a ``tf.Tensor``
step_size : float, optional, default: 0.01
The step size for the stochastic Euler solver.
**kwargs : dict, optional, default: {}
Additional keyword arguments passed to the networks
Returns
-------
post_samples : tf.Tensor or np.ndarray of shape (n_data_sets, n_samples, n_params)
The sampled parameters from the approximate posterior of each data set
"""
# Compute condition (direct, summary, or both)
_, conditions = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
n_data_sets = tf.shape(conditions)[0]
# Sample initial latent variables -> shape (n_data_sets, n_samples, latent_dim)
latent_vars = self.latent_dist.sample((n_data_sets, n_samples))
# Replicate conditions and solve ODEs simulatenously
conditions = tf.stack([conditions] * n_samples, axis=1)
post_samples = self._solve_euler(latent_vars, conditions, step_size, **kwargs)
# Remove trailing first dimension in the single data case
if n_data_sets == 1:
post_samples = tf.squeeze(post_samples, axis=0)
# Return numpy version of tensor or tensor itself
if to_numpy:
return post_samples.numpy()
return post_samples
def log_density(self, input_dict, to_numpy=True, step_size=1e-3, **kwargs):
"""Computes the log density..."""
# Compute condition (direct, summary, or both)
_, conditions = self._compute_summary_condition(
input_dict.get(defaults.DEFAULT_KEYS["summary_conditions"]),
input_dict.get(defaults.DEFAULT_KEYS["direct_conditions"]),
training=False,
**kwargs,
)
# Extract targets
target_vars = input_dict[defaults.DEFAULT_KEYS["targets"]]
# Reverse ODE and log pdf computation with the trace method
latents, trace = self._solve_euler_inv(target_vars, conditions, step_size, **kwargs)
lpdf = self.latent_dist.log_prob(latents) + trace
# Return numpy version of tensor or tensor itself
if to_numpy:
return lpdf.numpy()
return lpdf
def _solve_euler(self, latent_vars, condition, dt=1e-3, **kwargs):
"""Simple stochastic parallel Euler solver."""
num_steps = int(1 / dt)
time_vec = tf.zeros((tf.shape(latent_vars)[0], tf.shape(latent_vars)[1], 1))
target = tf.identity(latent_vars)
for _ in range(num_steps + 1):
target += self.drift_net.drift(target, time_vec, condition, **kwargs) * dt
time_vec += dt
return target
def _solve_euler_inv(self, targets, condition, dt=1e-3, **kwargs):
"""Solves the reverse ODE (negative direction of drift) and returns the trace."""
def velocity(latents, drift, time_vec, condition, **kwargs):
v = drift(latents, time_vec, condition, **kwargs)
return v
batch_size = tf.shape(targets)[0]
num_samples = tf.shape(targets)[1]
num_steps = int(1 / dt)
time_vec = tf.ones((batch_size, num_samples, 1))
trace = tf.zeros((batch_size, num_samples))
latents = tf.identity(targets)
for _ in range(num_steps + 1):
f = partial(velocity, drift=self.drift_net.drift, time_vec=time_vec, condition=condition)
drift_t, trace_t = compute_jacobian_trace(f, latents, **kwargs)
latents -= drift_t * dt
trace -= trace_t * dt
time_vec -= dt
return latents, trace
def _compute_summary_condition(self, summary_conditions, direct_conditions, **kwargs):
"""Determines how to concatenate the provided conditions."""
# Compute learnable summaries, if given
if self.summary_net is not None:
sum_condition = self.summary_net(summary_conditions, **kwargs)
else:
sum_condition = None
# Concatenate learnable summaries with fixed summaries
if sum_condition is not None and direct_conditions is not None:
full_cond = tf.concat([sum_condition, direct_conditions], axis=-1)
elif sum_condition is not None:
full_cond = sum_condition
elif direct_conditions is not None:
full_cond = direct_conditions
else:
raise SummaryStatsError("Could not concatenarte or determine conditioning inputs...")
return sum_condition, full_cond
def _determine_latent_dist(self, latent_dist):
"""Determines which latent distribution to use and defaults to unit normal if ``None`` provided."""
if latent_dist is None:
return tfp.distributions.MultivariateNormalDiag(loc=[0.0] * self.latent_dim)
else:
return latent_dist
def _determine_summary_loss(self, loss_fun):
"""Determines which summary loss to use if default `None` argument provided, otherwise return identity."""
# If callable, return provided loss
if loss_fun is None or callable(loss_fun):
return loss_fun
# If string, check for MMD or mmd
elif type(loss_fun) is str:
if loss_fun.lower() == "mmd":
return mmd_summary_space
else:
raise NotImplementedError("For now, only 'mmd' is supported as a string argument for summary_loss_fun!")
# Throw if loss type unexpected
else:
raise NotImplementedError(
"Could not infer summary_loss_fun, argument should be of type (None, callable, or str)!"
)
def _determine_loss(self, loss_fun):
"""Determines which summary loss to use if default ``None`` argument provided, otherwise return identity."""
if loss_fun is None:
return tf.keras.losses.log_cosh
return loss_fun
| 19,610 | 45.035211 | 128 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/policy_twap.py
|
"""
TWAP strategy
"""
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from env import make_env
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.preprocessing import StandardScaler
from scipy.special import softmax, expit
from collections import deque
from tqdm import trange
import pandas as pd
import numpy as np
import itertools
import pdb
import os
class DefaultConfig(object):
path_raw_data = '/mnt/execution_data_v2/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
result_path = 'results/exp34'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
# ############################### Trade Setting 1 Parameters ###############################
# # Planning horizon is 30mins
# simulation_planning_horizon = 30
# # Total volume to trade w.r.t. the basis volume
# simulation_volume_ratio = 0.005
# # Type of action space
# simulation_action_type = 'discrete_p'
# # Order volume = total volume / simulation_num_shares
# simulation_num_shares = 10
# # Use discrete actions
# simulation_discrete_actions = np.linspace(-30, 30, 61)
# ############################### END ######################################################
# ############################### Trade Setting 2 Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# Type of action space
simulation_action_type = 'discrete_q'
# Use discrete actions
simulation_discrete_actions = np.arange(31)
# ############################### END ######################################################
simulation_direction = 'sell'
# Quadratic penalty to minimize the impact of permanent market impact
# Penalty = coeff * basis_price / basis_volume
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = 0.1
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Scale the price delta if we use continuous actions
simulation_continuous_action_scale = 10
# Scale the reward to approx. unit range
simulation_reward_scale = 1000
class TWAP_Agent(object):
def __init__(self):
super(TWAP_Agent, self).__init__()
def act(self, market_state, private_state):
elapsed_time = private_state[0]
executed_quantity = 1 - private_state[1]
if elapsed_time >= executed_quantity:
return 0
else:
return 60
class TWAP_Agent2(object):
def __init__(self):
super(TWAP_Agent2, self).__init__()
def act(self, market_state, private_state):
return 1
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, agent):
def run(dumb):
bps = []
rews = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record = self.evaluate_single(agent, code=code, date=date)
bps.append(record['BP'].values[-1])
rews.append(record['reward'].sum())
return np.mean(bps), np.mean(rews)
pool = Pool(80)
record = pool.map(run, list(range(1000)))
bp_list = [item[0] for item in record]
rew_list = [item[1] for item in record]
return dict(
BP_avg=np.mean(bp_list),
reward_avg=np.mean(rew_list),
BP_std=np.std(bp_list),
reward_std=np.std(rew_list)
)
def evaluate_detail_batch(self, agent, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res = self.evaluate_single(agent, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, agent, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
while not done:
action = agent.act(sm, sp)
nsm, nsp, reward, done, info = self.env.step(action)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=np.round((1 + self.config.simulation_discrete_actions[action] / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100 if action is not None else None,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record)
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
if __name__ == '__main__':
for i, lin_reg in enumerate([1.0, 0.1, 0.01]):
config = DefaultConfig()
config.simulation_linear_reg_coeff = lin_reg
evaluation = Evaluation(config)
agent = TWAP_Agent2()
result = evaluation.evaluate(agent)
print('Lin_reg={:.1E} BP={:.4f}({:.4f}) reward={:.4f}({:.4f})'\
.format(lin_reg, result['BP_avg'], result['BP_std'], result['reward_avg'], result['reward_std']))
evaluation.evaluate_detail_batch(agent, iteration=i+20)
| 9,303 | 35.486275 | 131 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/constants.py
|
# 股票代码
CODE_LIST = ['FINRL_4078', 'FINRL_4087', 'FINRL_4076', 'FINRL_4090', 'FINRL_4095', 'FINRL_4084', 'FINRL_4073', 'FINRL_4085', 'FINRL_4091', 'FINRL_4068', 'FINRL_4080', 'FINRL_4083', 'FINRL_4079', 'FINRL_4094', 'FINRL_4063', 'FINRL_4066', 'FINRL_4092', 'FINRL_4071', 'FINRL_4088',]
# CODE_LIST = ['000858.XSHE', '300750.XSHE', '002466.XSHE', '002594.XSHE', '002460.XSHE', '601899.XSHG', '601012.XSHG', '000568.XSHE', '300059.XSHE', '601919.XSHG', '600111.XSHG', '600096.XSHG', '600905.XSHG', '600460.XSHG', '002176.XSHE', '601318.XSHG', '601600.XSHG', '300274.XSHE', '600702.XSHG', '000762.XSHE', '002129.XSHE', '600809.XSHG', '002714.XSHE', '600438.XSHG', '600763.XSHG', '603986.XSHG', '600141.XSHG', '000519.XSHE', '002709.XSHE', '002326.XSHE', '000591.XSHE', '600036.XSHG', '600703.XSHG', '600436.XSHG', '300390.XSHE', '002240.XSHE', '300014.XSHE', '600089.XSHG', '600171.XSHG', '000422.XSHE', '300343.XSHE', '600887.XSHG', '603799.XSHG', '000792.XSHE', '600031.XSHG', '002610.XSHE', '000799.XSHE', '601615.XSHG', '000630.XSHE', '000661.XSHE', '603026.XSHG', '600884.XSHG', '600276.XSHG', '300999.XSHE', '002202.XSHE', '601088.XSHG', '300207.XSHE', '002407.XSHE', '601016.XSHG', '000807.XSHE', '603501.XSHG', '601888.XSHG', '000725.XSHE', '603259.XSHG', '002812.XSHE', '000009.XSHE', '601669.XSHG', '000002.XSHE', '600010.XSHG', '002385.XSHE', '600893.XSHG', '603077.XSHG', '688772.XSHG', '000998.XSHE', '002371.XSHE', '600150.XSHG', '000723.XSHE', '300124.XSHE', '000155.XSHE', '002304.XSHE', '002241.XSHE', '603260.XSHG', '600256.XSHG', '601225.XSHG', '002340.XSHE', '002475.XSHE', '300760.XSHE', '000831.XSHE', '002074.XSHE', '000983.XSHE', '000625.XSHE', '600418.XSHG', '002497.XSHE', '600338.XSHG', '600048.XSHG', '002459.XSHE', '600499.XSHG', '300083.XSHE',]
JUNE_DATE_LIST = ['2020-12-16']
# JUNE_DATE_LIST = ['2021-06-01', '2021-06-02', '2021-06-03', '2021-06-04', '2021-06-07', '2021-06-08', '2021-06-09', '2021-06-10', '2021-06-11', '2021-06-15', '2021-06-16', '2021-06-17', '2021-06-18', '2021-06-21', '2021-06-22', '2021-06-23', '2021-06-24', '2021-06-25', '2021-06-28', '2021-06-29', '2021-06-30',]
VALIDATION_CODE_LIST = ['FINRL_4092', 'FINRL_4071']
# VALIDATION_CODE_LIST = ['300750.XSHE', '002466.XSHE', '002594.XSHE', '601899.XSHG', '601012.XSHG',]
VALIDATION_DATE_LIST = []
# VALIDATION_DATE_LIST = ['2021-07-02', '2021-07-05', '2021-07-06', '2021-07-07', '2021-07-08', '2021-07-09',]
TESTING_DATE_LIST = ['2020-12-16']
# TESTING_DATE_LIST = ['2021-07-12', '2021-07-14', '2021-07-15', '2021-07-16', '2021-07-19', '2021-07-20', '2021-07-21', '2021-07-22', '2021-07-23', '2021-07-26', '2021-07-27', '2021-07-28', '2021-07-29',]
| 2,655 | 176.066667 | 1,485 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/policy_tuned_ppo.py
|
"""
Tuned PPO algorithm for optimized trade execution
"""
from env_v2 import make_env
from storage import RolloutStorage
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from sklearn.preprocessing import StandardScaler
from pathos.multiprocessing import ProcessingPool as Pool
from scipy.special import softmax, expit
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch import Tensor
import torch.optim as opt
from tensorboardX import SummaryWriter
from collections import deque
from collections import namedtuple
from os import makedirs as mkdir
from os.path import join as joindir
from tqdm import trange
import numpy as np
import pandas as pd
import itertools
import argparse
import math
import time
import os
time_stamp = str(time.gmtime()[1]) + "-" + \
str(time.gmtime()[2]) + "-" + str(time.gmtime()[3]) + "-" + \
str(time.gmtime()[4]) + "-" + str(time.gmtime()[5])
Transition = namedtuple('Transition', ('sm', 'sp', 'value', 'action', 'logproba', 'mask', 'next_sm', 'next_sp', 'reward'))
EPS = 1e-10
# RESULT_DIR = 'results/ppo_exp1' # + time_stamp
# mkdir(RESULT_DIR, exist_ok=True)
# Hyperparameters
parser = argparse.ArgumentParser(description='PlaNet or Dreamer')
parser.add_argument('--arch', type=str, default='v1', choices=['v1', 'v2', 'v2-5', 'v3'])
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--seed', type=int, default=8888)
args_ = parser.parse_args()
class DefaultConfig(object):
path_raw_data = '/data/execution_data/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
# path_pkl_data = os.path.expanduser('~/execution_data/pkl')
result_path = 'results/ppo_exp3'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
agent_scale = 1000
agent_batch_size = 2048
agent_learn_start = 1000
agent_gamma = 0.998
# agent_epsilon = 0.7
agent_total_steps = 20 * agent_scale
# Smooth L1 loss (SL1) or mean squared error (MSE)
# agent_loss_type = 'SL1'
# agent_lr_decay_freq = 2000
agent_eval_freq = 100
agent_plot_freq = 50
agent_device = 'cuda'
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# ############################### Trade Setting Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Order volume = total volume / simulation_num_shares
simulation_num_shares = 10
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# ############################### END ###############################
# ############################### Test Parameters ###############################
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = [0.1, 0.01]
agent_network_structrue = None
# ############################### END ###############################
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
simulation_direction = 'sell'
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Use discrete actions
simulation_discreate_actions = \
np.concatenate([[-50, -40, -30, -25, -20, -15], np.linspace(-10, 10, 21), [15, 20, 25, 30, 40, 50]])
# Scale the price delta if we use continuous actions
simulation_continuous_action_scale = 10
# Use 'discrete' or 'continuous' action space?
simulation_action_type = 'discrete'
# PPO parameters =====
# tricks
agent_learning_rate = [1e-4, 1e-5]
eps = 1e-5
clip_param = 0.2
num_epoch = 4
num_mini_batch = 32
value_loss_coef = 0.5
entropy_coef = 0.01
max_grad_norm = 0.5
use_clipped_value_loss = True
num_steps = 2048
gae_lambda = 0.95
use_linear_lr_decay = True
schedule_adam = 'linear'
schedule_clip = 'linear'
layer_norm = True
state_norm = True
advantage_norm = True
lossvalue_norm = True
clip = 0.2
lamda = 0.97
# ====================
seed = 3333
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
return dict(mean_bp_first=mean_bp_first, mean_bp_last=mean_bp_last, mean_rew_first=mean_rew_first, mean_rew_last=mean_rew_last)
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class Memory(object):
def __init__(self):
self.memory = []
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self):
return Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
init_ = lambda m: init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class ActorCritic_v2_Discrete(nn.Module):
def __init__(self, num_inputs1, num_inputs2, num_outputs, hidden=64, layer_norm=True):
super(ActorCritic_v2_Discrete, self).__init__()
self.num_inputs1 = num_inputs1
self.num_inputs2 = num_inputs2
self.num_outputs = num_outputs
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor_fc1 = nn.Sequential(init_(nn.Linear(num_inputs1, hidden*2)), nn.Tanh(),
init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.actor_fc2 = nn.Sequential(init_(nn.Linear(num_inputs2, hidden)), nn.Tanh())
self.actor_fc3 = nn.Sequential(init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.dist = Categorical(hidden, num_outputs)
self.critic_fc1 = nn.Sequential(init_(nn.Linear(num_inputs1, hidden*2)), nn.Tanh(),
init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.critic_fc2 = nn.Sequential(init_(nn.Linear(num_inputs2, hidden)), nn.Tanh())
self.critic_fc3 = nn.Sequential(init_(nn.Linear(hidden*2, hidden)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden, 1))
self.train()
def forward(self, market_states, private_states):
"""
run policy network (actor) as well as value network (critic)
:param states: a Tensor2 represents states
:return: 3 Tensor2
"""
hidden_actor = self._forward_actor(market_states, private_states)
hidden_critic = self._forward_critic(market_states, private_states)
critic_value = self.critic_linear(hidden_critic)
return critic_value, hidden_actor
def _forward_actor(self, market_states, private_states):
market = self.actor_fc1(market_states)
private = self.actor_fc2(private_states)
states = torch.cat((market, private), 1) # (1, hidden) + (1, hidden) => (1, hidden * 2)
hidden_actor = self.actor_fc3(states)
return hidden_actor
def _forward_critic(self, market_states, private_states):
market = self.critic_fc1(market_states)
private = self.critic_fc2(private_states)
states = torch.cat((market, private), 1)
hidden_critic = self.critic_fc3(states)
return hidden_critic
def act(self, market_states, private_states):
value, actor_features = self.forward(market_states, private_states)
dist = self.dist(actor_features)
action = dist.sample()
action_log_probs = dist.log_probs(action)
return value, action, action_log_probs
def get_value(self, market_states, private_states):
value, _ = self.forward(market_states, private_states)
return value
def evaluate_actions(self, market_states, private_states, action):
value, actor_features = self.forward(market_states, private_states)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy
class Agent(object):
def __init__(self, config, writer):
super(Agent, self).__init__()
self._set_seed()
# ==== initialization ====
self.clip_param = config.clip_param
self.ppo_epoch = config.num_epoch
self.num_mini_batch = config.num_mini_batch
self.value_loss_coef = config.value_loss_coef
self.entropy_coef = config.entropy_coef
self.max_grad_norm = config.max_grad_norm
self.use_clipped_value_loss = config.use_clipped_value_loss
self.num_steps = config.num_steps
self.use_linear_lr_decay = config.use_linear_lr_decay
self.config = config
self.env = make_env(config)
self.dim_input1 = self.env.observation_dim # dimension of market states
self.dim_input2 = 2 # dimension of private states
self.dim_output = self.env.action_dim # for continuous, =1
network = config.agent_network_structrue
self.network = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.optimizer = opt.Adam(self.network.parameters(), lr=config.agent_learning_rate, eps=config.eps)
# =========================
# ==== Print Parameters ====
print("Network:", config.agent_network_structrue)
print("Learning Rate:", config.agent_learning_rate)
print("EPS:", config.eps)
print("Clip param:", self.clip_param)
print("PPO epoch:", self.ppo_epoch)
print("Num mini batch:", self.num_mini_batch)
print("Value loss coef:", self.value_loss_coef)
print("Entropy coef:", self.entropy_coef)
print("Max grad norm:", self.max_grad_norm)
print("Use clipped value loss:", self.use_clipped_value_loss)
print("Num steps:", self.num_steps)
print("use_linear_lr_decay:", self.use_linear_lr_decay)
# ===========================
self.rollouts = RolloutStorage(self.num_steps, self.dim_input1, self.dim_input2, self.dim_output)
self.running_state_m = ZFilter((self.dim_input1,), clip=5.0)
self.running_state_p = ZFilter((self.dim_input2,), clip=5.0)
self.writer = writer
self.evaluation = Evaluation(self.config)
@staticmethod
def _filter(state):
return np.clip(state, -3, 3)
def _set_seed(self, seed=None):
if seed is None:
seed = int.from_bytes(os.urandom(4), byteorder='little')
else:
seed = seed + 1234
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def learn(self):
train_record = []
eval_record = []
# record average 1-round cumulative reward in every episode
# reward_record = []
global_steps = 0
ms_scaler = StandardScaler()
self.env.reset() # warm up the environment
# ==== market state normalization ====
obs_market_list = []
for _ in range(self.num_steps):
# random sample action to collect some samples
a = self.env.action_sample_func()
obs_market, obs_private, reward, done, info = self.env.step(a)
if done:
obs_market, obs_private = self.env.reset()
obs_market_list.append(obs_market)
ms_scaler.fit(np.array(obs_market_list))
# =====================================
obs_market, obs_private = self.env.reset()
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
self.rollouts.obs_market[0].copy_(torch.from_numpy(obs_market))
self.rollouts.obs_private[0].copy_(torch.from_numpy(obs_private))
self.rollouts.to(self.config.agent_device)
for i_episode in trange(self.config.agent_total_steps):
reward_list = []
if self.use_linear_lr_decay:
# decrease learning rate linearly
lr = self.config.agent_learning_rate - (self.config.agent_learning_rate * (i_episode / float(self.config.agent_total_steps)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
reward_sum = 0
t = 0
for step in range(self.num_steps):
# (1) Sample actions
with torch.no_grad():
value, action, action_log_prob = self.network.act(
self.rollouts.obs_market[step].unsqueeze(0), self.rollouts.obs_private[step].unsqueeze(0))
# Obser reward and next obs
obs_market, obs_private, reward, done, info = self.env.step(action)
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
# If done then clean the history of observations.
masks = torch.FloatTensor((0.0,)) if done else torch.FloatTensor((1.0,))
reward = torch.FloatTensor((reward,))
reward_sum += reward
if done:
train_record.append(dict(
i=i_episode,
reward=reward_sum,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
code=info['code'],
date=info['date'],
start_index=info['start_index']
))
reward_list.append(reward_sum)
global_steps += (t + 1)
reward_sum = 0
t = 0
obs_market, obs_private = self.env.reset()
obs_market = self._filter(ms_scaler.transform(np.array(obs_market).reshape(1, -1)))[0]
t = t + 1
self.rollouts.insert(torch.from_numpy(obs_market), torch.from_numpy(obs_private),
action[0], action_log_prob[0], value[0], reward, masks)
# reward_record.append({
# 'episode': i_episode,
# 'steps': global_steps,
# 'meanepreward': torch.mean(reward_list)})
with torch.no_grad():
next_value = self.network.get_value(
self.rollouts.obs_market[-1].unsqueeze(0), self.rollouts.obs_private[-1].unsqueeze(0)).detach()
self.rollouts.compute_returns(next_value[0], self.config.agent_gamma, self.config.gae_lambda)
advantages = self.rollouts.returns[:-1] - self.rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
data_generator = self.rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
obs_market_batch, obs_private_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.network.evaluate_actions(
obs_market_batch, obs_private_batch, actions_batch)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
if self.use_clipped_value_loss:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
else:
value_loss = 0.5 * (return_batch - values).pow(2).mean()
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss -
dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.network.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
# value_loss_epoch, action_loss_epoch, dist_entropy_epoch
self.rollouts.after_update()
# Step 5: Evaluate and log performance
if i_episode % self.config.agent_plot_freq == 0 and len(train_record) > 0:
print(train_record[-1])
self.evaluation.evaluate_detail_batch(self.network, ms_scaler, iteration=i_episode)
self.writer.add_scalar("train/reward", torch.mean(train_record[-1]['reward']), i_episode)
self.writer.add_scalar("train/BP", train_record[-1]['BP'], i_episode)
self.writer.add_scalar("train/IS", train_record[-1]['IS'], i_episode)
self.writer.add_scalar("train/value_loss_epoch", value_loss_epoch, i_episode)
self.writer.add_scalar("train/action_loss_epoch", action_loss_epoch, i_episode)
self.writer.add_scalar("train/dist_entropy_epoch", dist_entropy_epoch, i_episode)
if i_episode % self.config.agent_eval_freq == 0:
eval_record.append(self.evaluation.evaluate(self.network, ms_scaler))
print("BP:", eval_record[-1]['BP'], 'Reward:', eval_record[-1]['reward'])
np.save(self.config.result_path + "/eval_record_"+str(i_episode)+".npy", eval_record[-1]['ac_list'])
self.writer.add_scalar("eval/reward", np.mean(eval_record[-1]['reward']), i_episode)
self.writer.add_scalar("eval/BP", np.mean(eval_record[-1]['BP']), i_episode)
self.writer.add_scalar("eval/ac_min", np.mean(eval_record[-1]['ac_min']), i_episode)
self.writer.add_scalar("eval/ac_max", np.mean(eval_record[-1]['ac_max']), i_episode)
self.writer.add_scalar("eval/ac_mean", np.mean(eval_record[-1]['ac_mean']), i_episode)
return train_record, eval_record
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, network, scalar):
bp_list = []
rew_list = []
ac_list = []
ac_mean_list = []
ac_logstd_list = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record, action_list, action_mean_list, action_logstd_list = self.evaluate_single(network, scalar, code=code, date=date)
bp_list.append(record['BP'].values[-1])
rew_list.append(record['reward'].sum())
ac_list.append(action_list)
ac_mean_list.append(action_mean_list)
ac_logstd_list.append(action_logstd_list)
return dict(
BP=np.mean(bp_list),
reward=np.mean(rew_list),
ac_min = np.min(ac_list),
ac_max = np.max(ac_list),
ac_mean = np.mean(ac_list),
ac_list = ac_list
)
def evaluate_detail_batch(self, network, scalar, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res, _, _, _ = self.evaluate_single(network, scalar, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, network, scalar, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
action_list = []
action_mean_list = []
action_logstd_list = []
while not done:
sm = Agent._filter(scalar.transform(sm.reshape(1, -1)))[0]
value, action, action_log_prob = network.act(Tensor(sm).unsqueeze(0).to(device=self.config.agent_device),
Tensor(sp).unsqueeze(0).to(device=self.config.agent_device))
action = action.item()
action_list.append(action)
action_logstd_list.append(action_log_prob.item())
nsm, nsp, reward, done, info = self.env.step(action)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=np.round((1 + self.config.simulation_discreate_actions[action] / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100 if action is not None else None,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record), action_list, action_mean_list, action_logstd_list
def run(argus):
model, lr, lin_reg, num_epoch, parallel_id = argus
config = DefaultConfig()
config.agent_learning_rate = lr
config.simulation_linear_reg_coeff = lin_reg
config.num_epoch = num_epoch
# config.simulation_continuous_action_scale = action_scale
# config.agent_network_structrue = model
if model == 'v2-5':
print("discrete ppo")
config.agent_network_structrue = ActorCritic_v2_Discrete
# elif model == 'v3':
# config.agent_network_structrue = ActorCritic_v3
else:
raise NotImplementedError
info = dict(learning_rate=lr, linear_reg=lin_reg, num_epoch=num_epoch, architecture=config.agent_network_structrue.__name__, parallel_id=parallel_id)
print("Config:", info)
id_str = '{}_lr-{:.1E}_linreg-{:.1E}_numepoch-{}_id-{}'.format(model, lr, lin_reg, num_epoch, parallel_id)
config.result_path = os.path.join(config.result_path, id_str)
print("result path:", config.result_path)
os.makedirs(config.result_path, exist_ok=True)
extend_path = lambda x: os.path.join(config.result_path, x)
writer = SummaryWriter(config.result_path + '/logs-' + str(parallel_id))
agent = Agent(config, writer)
train_record, eval_record = agent.learn()
train_record, eval_record = pd.DataFrame(train_record), pd.DataFrame(eval_record)
train_record.to_csv(extend_path('dqn_train_record.csv'))
eval_record.to_csv(extend_path('dqn_eval_record.csv'))
train_info = Figure().plot_training_process_basic(train_record, extend_path('dqn_train_record.png'))
eval_info = Figure().plot_training_process_basic(eval_record, extend_path('dqn_eval_record.png'))
info.update({('trn_' + k): v for k, v in train_info.items()})
info.update({('val_' + k): v for k, v in eval_info.items()})
return info
if __name__ == '__main__':
record = []
# test_list = list(itertools.product(['v1', 'v2', 'v3'], [3e-4, 1e-4], [0.1, 0.01], [3, 5, 10], np.arange(5)))
test_list = list(itertools.product(['v2-5',], [5e-5], [0.01,], [4,], np.arange(3)))
pool = Pool(3)
record = pool.map(run, test_list)
record = pd.DataFrame(record)
record.to_csv(os.path.join(DefaultConfig().result_path, 'result_original.csv'))
stats = record.groupby(['learning_rate', 'linear_reg', 'architecture']).agg([np.mean, np.std])
stats.to_csv(os.path.join(DefaultConfig().result_path, 'result_stats.csv'))
| 28,315 | 34.572864 | 153 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/policy_tuned_dqn.py
|
"""
Tuned DQN algorithm for optimized trade execution
"""
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from constants import CODE_LIST, JUNE_DATE_LIST, VALIDATION_DATE_LIST, VALIDATION_CODE_LIST
from env import make_env
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.preprocessing import StandardScaler
from scipy.special import softmax, expit
from collections import deque
from tqdm import trange
import pandas as pd
import numpy as np
import itertools
import pdb
import os
class DefaultConfig(object):
path_raw_data = '/data/execution_data_v2/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = '/mnt/execution_data_v2/pkl'
# path_pkl_data = os.path.expanduser('~/execution_data/pkl')
result_path = 'results/exp36'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
code_list_validation = VALIDATION_CODE_LIST
date_list_validation = VALIDATION_DATE_LIST
agent_scale = 100000
agent_batch_size = 128
agent_learn_start = 1000
agent_gamma = 0.998
agent_epsilon = 0.7
agent_total_steps = 20 * agent_scale
agent_buffer_size = agent_scale
agent_network_update_freq = 4
# Smooth L1 loss (SL1) or mean squared error (MSE)
agent_loss_type = 'SL1'
agent_lr_decay_freq = 2000
agent_target_update_freq = 2000
agent_eval_freq = 2000
# Becomes 0.01 upon 70% of the training
agent_epsilon_decay = np.exp(np.log(0.01) / (agent_scale * 0.5))
agent_plot_freq = 20000
agent_device = 'cuda'
# Selected features
simulation_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff',
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume', 'volatility',
'trend', 'immediate_market_order_cost_bid',
]
# ############################### Trade Setting Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# Order volume = total volume / simulation_num_shares
simulation_num_shares = 10
# Maximum quantity is total_quantity / simulation_num_shares; further devide this into 3 levels
simulation_discrete_quantities = 3
# Choose the wrapper
simulation_action_type = 'discrete_pq'
# Discrete action space
simulation_discrete_actions = \
list(itertools.product(
np.concatenate([[-50, -40, -30, -25, -20, -15], np.linspace(-10, 10, 21), [15, 20, 25, 30, 40, 50]]),
np.arange(simulation_discrete_quantities) + 1
))
# ############################### END ###############################
# ############################### Test Parameters ###############################
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = [0.1]
agent_learning_rate = [2e-5, 1e-5, 5e-6]
agent_network_structrue = 'MLPNetwork_complex,MLPNetwork_Xcomplex'
# ############################### END ###############################
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
simulation_direction = 'sell'
# If the quantity is not fully filled at the last time step, we place an MO to liquidate and further plus a penalty
simulation_not_filled_penalty_bp = 2.0
# Scale the price delta if we use continuous actions
# simulation_continuous_action_scale = 10
# The Q network
class MLPNetwork(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=128):
super(MLPNetwork, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = self.fc4(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more parameters
class MLPNetwork_complex(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=256):
super(MLPNetwork_complex, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = self.fc5(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more more parameters
class MLPNetwork_Xcomplex(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=512):
super(MLPNetwork_Xcomplex, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc3 = nn.Linear(dim_input2, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, hidden)
self.fc6 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
y = F.relu(self.fc3(private_states))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = F.relu(self.fc5(z))
z = self.fc6(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
# The Q network - more parameters + positional encoding
class MLPNetwork_complex_posenc(nn.Module):
def __init__(self, dim_input1, dim_input2, dim_output, hidden=256):
super(MLPNetwork_complex_posenc, self).__init__()
self.dim_input1 = dim_input1
self.dim_input2 = dim_input2
self.dim_output = dim_output
self.hidden = hidden
self.fc1 = nn.Linear(dim_input1, 2 * hidden)
self.fc2 = nn.Linear(2 * hidden, hidden)
self.fc4 = nn.Linear(2 * hidden, hidden)
self.fc5 = nn.Linear(hidden, dim_output)
def forward(self, market_states, private_states):
y = torch.einsum('bi, j->bij', private_states, torch.arange(self.hidden // self.dim_input2, device=private_states.device))
y = y.view(-1, self.hidden)
y = torch.sin(y * 12345).detach()
x = F.relu(self.fc1(market_states))
x = F.relu(self.fc2(x))
z = torch.cat((x, y), 1)
z = F.relu(self.fc4(z))
z = self.fc5(z)
return z
def act(self, market_state, private_state, device='cuda'):
market_state = Tensor(market_state).unsqueeze(0).to(device=device)
private_state = Tensor(private_state).unsqueeze(0).to(device=device)
return int(self.forward(market_state, private_state).argmax(1)[0])
def act_egreedy(self, market_state, private_state, e=0.7, device='cuda'):
return self.act(market_state, private_state, device='cuda') if np.random.rand() > e \
else np.random.randint(self.dim_output)
class ReplayBuffer(object):
"""docstring for ReplayBuffer"""
def __init__(self, maxlen):
super(ReplayBuffer, self).__init__()
self.maxlen = maxlen
self.data = deque(maxlen=maxlen)
def push(self, *args):
self.data.append(args)
def sample(self, batch_size):
inds = np.random.choice(len(self.data), batch_size, replace=False)
return zip(*[self.data[i] for i in inds])
def sample_all(self):
return zip(*list(self.data))
def update_all(self, new_data, ind):
for i in range(len(self.data)):
tup = list(self.data[i])
tup[ind] = new_data[i, :]
self.data[i] = tuple(tup)
class Agent(object):
def __init__(self, config):
super(Agent, self).__init__()
self._set_seed()
self.config = config
self.env = make_env(config)
self.dim_input1 = self.env.observation_dim # dimension of market states
self.dim_input2 = 2 # dimension of private states
self.dim_output = self.env.action_dim
network = config.agent_network_structrue
self.network = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.network_target = network(self.dim_input1, self.dim_input2, self.dim_output).to(device=self.config.agent_device)
self.network_target.load_state_dict(self.network.state_dict())
self.optimizer = opt.Adam(self.network.parameters(), lr=config.agent_learning_rate)
self.scheduler = opt.lr_scheduler.StepLR(self.optimizer, step_size=config.agent_lr_decay_freq, gamma=0.998)
self.buffer = ReplayBuffer(self.config.agent_buffer_size)
self.evaluation = Evaluation(self.config)
if config.agent_loss_type == 'MSE':
self.loss_func = nn.MSELoss()
elif config.agent_loss_type == 'SL1':
self.loss_func = F.smooth_l1_loss
def _set_seed(self, seed=None):
if seed is None:
seed = int.from_bytes(os.urandom(4), byteorder='little')
else:
seed = seed + 1234
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@staticmethod
def _filter(state):
return np.clip(state, -3, 3)
def _to_tensor(self, tensor, dtype=torch.float):
return torch.tensor(tensor, dtype=dtype, device=self.config.agent_device)
def learn(self):
train_record = []
eval_record = []
reward = 0
eplen = 0
loss = 0
avg_Q = 0
epsilon = self.config.agent_epsilon
ms_scaler = StandardScaler()
sm, sp = self.env.reset()
for i in trange(self.config.agent_total_steps):
# Step 1: Execute one step and store it to the replay buffer
if i <= self.config.agent_learn_start:
a = self.env.action_sample_func()
else:
tsm = ms_scaler.transform(sm.reshape(1, -1)).flatten()
a = self.network.act_egreedy(tsm, sp, e=epsilon, device=self.config.agent_device)
nsm, nsp, r, done, info = self.env.step(a)
self.buffer.push(sm, sp, a, r, nsm, nsp, done)
reward += r
eplen += 1
if done:
train_record.append(dict(
i=i,
reward=reward,
eplen=eplen,
epsilon=epsilon,
lr=self.optimizer.param_groups[0]['lr'],
loss=float(loss),
avg_Q=float(avg_Q),
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
code=info['code'],
date=info['date'],
start_index=info['start_index']
))
reward = 0
eplen = 0
epsilon = max(0.01, epsilon * self.config.agent_epsilon_decay)
sm, sp = self.env.reset()
else:
sm, sp = nsm, nsp
# Step 2: Estimate variance for market states
if i == self.config.agent_learn_start:
market_states, _, _, _, nmarket_states, _, _ = self.buffer.sample_all()
ms_scaler.fit(np.array(market_states))
# Since we will use the buffer later, so we need to scale the market states in the buffer
self.buffer.update_all(ms_scaler.transform(market_states), 0)
self.buffer.update_all(ms_scaler.transform(nmarket_states), 4)
# Step 3: Update the network every several steps
if i >= self.config.agent_learn_start and i % self.config.agent_network_update_freq == 0:
# sample a batch from the replay buffer
bsm, bsp, ba, br, bnsm, bnsp, bd = self.buffer.sample(self.config.agent_batch_size)
market_states = self._to_tensor(self._filter(ms_scaler.transform(np.array(bsm))))
private_states = self._to_tensor(np.array(bsp))
actions = self._to_tensor(np.array(ba), dtype=torch.long)
rewards = self._to_tensor(np.array(br))
nmarket_states = self._to_tensor(self._filter(ms_scaler.transform(np.array(bnsm))))
nprivate_states = self._to_tensor(np.array(bnsp))
masks = self._to_tensor(1 - np.array(bd) * 1)
nactions = self.network(nmarket_states, nprivate_states).argmax(1)
Qtarget = (rewards + masks * self.config.agent_gamma * \
self.network_target(nmarket_states, nprivate_states)[range(self.config.agent_batch_size), \
nactions]).detach()
Qvalue = self.network(market_states, private_states)[range(self.config.agent_batch_size), actions]
avg_Q = Qvalue.mean().detach()
loss = self.loss_func(Qvalue, Qtarget)
self.network.zero_grad()
loss.backward()
for param in self.network.parameters():
param.grad.data.clamp_(-1, 1)
# print('Finish the {}-th iteration, the loss = {}'.format(i, float(loss)))
self.optimizer.step()
self.scheduler.step()
# Step 4: Update target network
if i % self.config.agent_target_update_freq == 0:
self.network_target.load_state_dict(self.network.state_dict())
# Step 5: Evaluate and log performance
if i % self.config.agent_plot_freq == 0 and len(train_record) > 0:
eval_agent = (lambda sm, sp: self.network.act_egreedy(ms_scaler.transform(sm.reshape(1, -1)).flatten(), sp, e=0.0)) \
if i > self.config.agent_learn_start else \
(lambda sm, sp: self.network.act_egreedy(sm, sp, e=0.0))
self.evaluation.evaluate_detail_batch(eval_agent, iteration=i)
print(train_record[-1])
if i % self.config.agent_eval_freq == 0:
eval_agent = (lambda sm, sp: self.network.act_egreedy(ms_scaler.transform(sm.reshape(1, -1)).flatten(), sp, e=0.0)) \
if i > self.config.agent_learn_start else \
(lambda sm, sp: self.network.act_egreedy(sm, sp, e=0.0))
eval_record.append(self.evaluation.evaluate(eval_agent))
print(eval_record[-1])
return train_record, eval_record
class Evaluation(object):
def __init__(self, config):
super(Evaluation, self).__init__()
self.config = config
self.env = make_env(config)
def evaluate(self, agent):
bp_list = []
rew_list = []
for code in self.config.code_list_validation:
for date in self.config.date_list_validation:
record = self.evaluate_single(agent, code=code, date=date)
bp_list.append(record['BP'].values[-1])
rew_list.append(record['reward'].sum())
return dict(
BP=np.mean(bp_list),
reward=np.mean(rew_list)
)
def evaluate_detail_batch(self, agent, iteration=1,
code='000504.XSHE',
date_list=['2021-06-01', '2021-06-03', '2021-06-04', '2021-07-02', '2021-07-05', '2021-07-06']):
path = os.path.join(self.config.result_path, 'evaluation', 'it{:08d}'.format(iteration))
os.makedirs(path, exist_ok=True)
record = []
for date in date_list:
for i in range(5):
res = self.evaluate_single(agent, code=code, date=date)
record.append(res)
Figure().plot_policy(df=res, filename=os.path.join(path, 'fig_{}_{}_{}.png'.format(code, date, i)))
pd.concat(record).to_csv(os.path.join(path, 'detail_{}.csv'.format(code)))
def evaluate_single(self, agent, code='600519.XSHG', date='2021-06-01'):
record = []
sm, sp = self.env.reset(code, date)
done = False
step = 0
action = None
info = dict(status=None)
while not done:
action = agent(sm, sp)
nsm, nsp, reward, done, info = self.env.step(action)
if self.config.simulation_action_type == 'discrete_pq':
order_price = self.config.simulation_discrete_actions[action][0]
order_price = np.round((1 + order_price / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100
elif self.config.simulation_action_type == 'discrete_p':
order_price = self.config.simulation_discrete_actions[action]
order_price = np.round((1 + order_price / 10000) \
* self.env.data.obtain_level('askPrice', 1) * 100) / 100
elif self.config.simulation_action_type == 'discrete_q':
order_price = self.env.data.obtain_level('bidPrice', 1)
record.append(dict(
code=code,
date=date,
step=step,
quantity=self.env.quantity,
action=action,
ask_price=self.env.data.obtain_level('askPrice', 1),
bid_price=self.env.data.obtain_level('bidPrice', 1),
order_price=order_price,
reward=reward,
cash=self.env.cash,
BP=self.env.get_metric('BP'),
IS=self.env.get_metric('IS'),
status=info['status'],
index=self.env.data.current_index
))
step += 1
sm, sp = nsm, nsp
return pd.DataFrame(record)
class Figure(object):
def __init__(self):
pass
@staticmethod
def plot_policy(df, filename):
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df['index'], df['ask_price'], label='ask_price')
ax1.plot(df['index'], df['bid_price'], label='bid_price')
ax1.plot(df['index'], df['order_price'], label='order_price')
ax1.legend(loc='lower left')
ax2.plot(df['index'], df['quantity'], 'k*', label='inventory')
ax1.set_title('{} {} BP={:.4f}'.format(df['code'].values[-1], df['date'].values[-1], df['BP'].values[-1]))
ax2.legend(loc='upper right')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
@staticmethod
def plot_training_process_basic(df, filename):
while df.shape[0] > 1500:
df = df[::2]
fig, ax1 = plt.subplots(figsize=(15, 6))
ax2 = ax1.twinx()
ax1.plot(df.index.values, df['reward'], 'C0', label='reward')
ax1.legend(loc='lower left')
ax2.plot(df.index.values, df['BP'], 'C1', label='BP')
ax2.legend(loc='upper right')
top_size = df.shape[0] // 10
mean_bp_first = np.mean(df['BP'].values[:top_size])
mean_bp_last = np.mean(df['BP'].values[-top_size:])
mean_rew_first = np.mean(df['reward'].values[:top_size])
mean_rew_last = np.mean(df['reward'].values[-top_size:])
ax2.set_title('BP {:.4f}->{:.4f} reward {:.4f}->{:.4f}'.format(mean_bp_first, mean_bp_last, mean_rew_first, mean_rew_last))
if 'loss' in df.columns:
ax3 = ax1.twinx()
p3, = ax3.plot(df.index.values, df['loss'], 'C2')
ax3.yaxis.label.set_color('C2')
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
return dict(mean_bp_first=mean_bp_first, mean_bp_last=mean_bp_last, mean_rew_first=mean_rew_first, mean_rew_last=mean_rew_last)
def run(argus):
model, lr, lin_reg, parallel_id = argus
config = DefaultConfig()
config.agent_learning_rate = lr
config.simulation_linear_reg_coeff = lin_reg
config.agent_network_structrue = model
info = dict(learning_rate=lr, linear_reg=lin_reg, architecture=model.__name__, parallel_id=parallel_id)
id_str = '{}_lr{:.1E}_linreg_{:.1E}_{}'.format(model.__name__, lr, lin_reg, parallel_id)
config.result_path = os.path.join(config.result_path, id_str)
os.makedirs(config.result_path, exist_ok=True)
extend_path = lambda x: os.path.join(config.result_path, x)
agent = Agent(config)
train_record, eval_record = agent.learn()
train_record, eval_record = pd.DataFrame(train_record), pd.DataFrame(eval_record)
train_record.to_csv(extend_path('dqn_train_record.csv'))
eval_record.to_csv(extend_path('dqn_eval_record.csv'))
train_info = Figure().plot_training_process_basic(train_record, extend_path('dqn_train_record.png'))
eval_info = Figure().plot_training_process_basic(eval_record, extend_path('dqn_eval_record.png'))
info.update({('trn_' + k): v for k, v in train_info.items()})
info.update({('val_' + k): v for k, v in eval_info.items()})
return info
if __name__ == '__main__':
record = []
test_list = list(itertools.product(
[MLPNetwork_complex, MLPNetwork_Xcomplex],
[2e-5, 1e-5, 5e-6],
[0.1, 0.01],
np.arange(5)
))
parallel = False
if parallel:
pool = Pool(4)
record = pool.map(run, test_list)
else:
record = []
for tmp in test_list:
tmp_res = run(tmp)
record = pd.DataFrame(record)
record.to_csv(os.path.join(DefaultConfig().result_path, 'result_original.csv'))
stats = record.groupby(['learning_rate', 'linear_reg', 'architecture']).agg([np.mean, np.std])
stats.to_csv(os.path.join(DefaultConfig().result_path, 'result_stats.csv'))
| 24,410 | 39.82107 | 135 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/env.py
|
"""
Simulated environment for trade execution
Terminology
Time tick: The minimal time interval of our raw data = 3s
Bar: The time interval of our simulated environment
Horizon: The total time interval for the sequential decision problem
"""
import os
import pdb
import pickle
import pandas as pd
import numpy as np
from pathos.multiprocessing import ProcessingPool as Pool
from constants import CODE_LIST, JUNE_DATE_LIST
NUM_CORES = 40
FEATURE_SET_LOB = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
'high_low_price_diff', 'close_price', 'volume', 'vwap', 'time_diff'
]
FEATURE_SET_FULL = FEATURE_SET_LOB + [
'ask_bid_spread', 'ab_volume_misbalance', 'transaction_net_volume',
'volatility', 'trend', 'immediate_market_order_cost_bid',
'VOLR', 'PCTN_1min', 'MidMove_1min', 'weighted_price', 'order_imblance',
'trend_strength'
]
# stock.csv -> raw/tic/2022-01-01.csv
class Preprocess(object):
filename = './data/stock.csv'
data = pd.read_csv(filename)
columns = data.columns.values.tolist()
tradeDate = []
dataTime = []
for item in data['datetime']:
tmp_tradedate = item[:10].replace('.', '-')
tmp_datatime = item[11:-4]
tradeDate.append(tmp_tradedate)
dataTime.append(tmp_datatime)
data['tradeDate'] = tradeDate
data['dataTime'] = dataTime
data.rename(columns=lambda x: x.replace('ask_volume', 'askVolume'), inplace=True)
data.rename(columns=lambda x: x.replace('bid_volume', 'bidVolume'), inplace=True)
data.rename(columns=lambda x: x.replace('bid', 'bidPrice') if len(x) >= 3 and x[3].isdigit() else x, inplace=True)
data.rename(columns=lambda x: x.replace('ask', 'askPrice') if len(x) >= 3 and x[3].isdigit() else x, inplace=True)
data.rename(columns=lambda x: x.replace('finrl_ticker', 'ticker'), inplace=True)
data.rename(columns=lambda x: x.replace('lastprice', 'lastPrice'), inplace=True)
data.rename(columns=lambda x: x.replace('delta_volume', 'volume'), inplace=True)
data.rename(columns=lambda x: x.replace('delta_turnover', 'value'), inplace=True)
data.drop(['datetime'], axis=1, inplace=True)
# 新添加的,原来的数据中没有
data['prevClosePrice'] = 20
data['openPrice'] = 21
# 删除行:删除不在时间段(9:30-11:30,13:00-14:57)的行
remove_indices = []
vec = data['dataTime']
for i in range(len(vec)):
if vec[i] < '09:30:00' or (vec[i] > '11:30:00' and vec[i] < '13:00:00') or vec[i] > '14:57:00':
remove_indices.append(i)
data.drop(index=remove_indices, inplace=True)
data.to_csv('./data/preprocessedstock.csv', index=False)
tickers = data['ticker'].unique()
dates = data['tradeDate'].unique()
print(f'tickers: {tickers}')
print(f'dates: {dates}')
data_tickers = {}
num_rows = data.shape[0]
# tmp = pd.DataFrame()
this_ticker = data['ticker'].iloc[0]
this_date = data['tradeDate'].iloc[0]
begin_row_index = 0
for i in range(num_rows):
if data['ticker'].iloc[i] == this_ticker:
if data['tradeDate'].iloc[i] != this_date:
df = data.iloc[list(range(begin_row_index, i)), :]
tmp_dict = {this_ticker: {this_date: df}}
data_tickers = {**data_tickers, **tmp_dict}
elif i == num_rows - 1:
df = data.iloc[list(range(begin_row_index, i + 1)), :]
tmp_dict = {this_ticker: {this_date: df}}
data_tickers = {**data_tickers, **tmp_dict}
else:
df = data.iloc[list(range(begin_row_index, i)), :]
tmp_dict = {this_ticker: {this_date: df}}
data_tickers = {**data_tickers, **tmp_dict}
this_ticker = data['ticker'].iloc[i]
this_date = data['tradeDate'].iloc[i]
begin_row_index = i
for ticker in tickers:
if not os.path.exists('./data/raw/' + ticker):
os.makedirs('./data/raw/' + ticker)
for date in dates:
df = data_tickers[ticker][date]
df.to_csv('./data/raw/' + ticker + '/' + date + '.csv', index=False)
pass
class DefaultConfig(object):
path_raw_data = './data/raw'
# path_pkl_data = '/data/execution_data/pkl'
path_pkl_data = './data/pkl'
result_path = './results/exp_env'
code_list = CODE_LIST
date_list = JUNE_DATE_LIST
# ############################### Trade Setting Parameters ###############################
# Planning horizon is 30mins
simulation_planning_horizon = 30
# Order volume = total volume / simulation_num_shares
simulation_num_shares = 10
# Total volume to trade w.r.t. the basis volume
simulation_volume_ratio = 0.005
# Encourage a uniform liquidation strategy
simulation_linear_reg_coeff = 0.1
# Features used for the market variable
simulation_features = FEATURE_SET_FULL # users can set
# Stack the features of the previous x bars
simulation_loockback_horizon = 5
# Whether return flattened or stacked features of the past x bars
simulation_do_feature_flatten = True
# A liquidation task
simulation_direction = 'sell'
# If the quantity is not fully filled at the last time step,
# we place an MO to fully liquidate and further plus a penalty (unit: bp)
simulation_not_filled_penalty_bp = 2.0
# Use discrete actions (unit: relative bp)
simulation_discrete_actions = \
np.concatenate([[-50, -40, -30, -25, -20, -15], np.linspace(-10, 10, 21), [15, 20, 25, 30, 40, 50]])
# Scale the price delta if we use continuous actions
simulation_continuous_action_scale = 10
# Use 'discrete' or 'continuous' action space?
simulation_action_type = 'discrete_p'
# ############################### END ###############################
class DataPrepare(object):
"""
For data preparation:
Parse raw csv files to pickle files required by the simulated environment
I.e., we transform time-tick-level csv data into bar level pkl data
"""
def __init__(self, config):
self.config = config
if not os.path.isdir(self.config.path_raw_data):
self.download_raw_data()
os.makedirs(self.config.path_pkl_data, exist_ok=True)
file_paths = self.obtain_file_paths()
parallel = False
res = []
if parallel:
pool = Pool(NUM_CORES)
res = pool.map(self.process_file, file_paths)
else:
for path in file_paths:
tmp_dict = self.process_file(path)
res.append(tmp_dict)
pd.DataFrame(res).to_csv('./data/data_generation_report.csv')
def download_raw_data(self):
raise NotImplementedError
@staticmethod
def _VOLR(df, beta1=0.551, beta2=0.778, beta3=0.699):
"""
Volume Ratio:
reflects the supply and demand of investment behavior.
Unit: Volume
"""
volr = beta1 * (df['bidVolume1'] - df['askVolume1']) / (df['bidVolume1'] + df['askVolume1']) + \
beta2 * (df['bidVolume2'] - df['askVolume2']) / (df['bidVolume2'] + df['askVolume2']) + \
beta3 * (df['bidVolume3'] - df['askVolume3']) / (df['bidVolume3'] + df['askVolume3'])
return volr
@staticmethod
def _PCTN(df, n):
"""
Price Percentage Change:
a simple mathematical concept that represents the degree of change over time,
it is used for many purposes in finance, often to represent the price change of a security.
Unit: One
"""
mid = (df['askPrice1'] + df['bidPrice1']) / 2
pctn = (mid - mid.shift(n)) / mid
return pctn
@staticmethod
def _MidMove(df, n):
"""
Middle Price Move:
indicates the movement of middle price, which can simply be defined as the average of
the current bid and ask prices being quoted.
Unit: One
"""
mid = (df['askPrice1'] + df['bidPrice1']) / 2
mean = mid.rolling(n).mean()
mid_move = (mid - mean) / mean
return mid_move
@staticmethod
def _BSP(df):
"""
Buy-Sell Pressure:
the distribution of chips in the buying and selling direction.
Unit: Volume
"""
EPS = 1e-5
mid = (df['askPrice1'] + df['bidPrice1']) / 2
w_buy_list = []
w_sell_list = []
for level in range(1, 6):
w_buy_level = mid / (df['bidPrice{}'.format(level)] - mid - EPS)
w_sell_level = mid / (df['askPrice{}'.format(level)] - mid + EPS)
w_buy_list.append(w_buy_level)
w_sell_list.append(w_sell_level)
sum_buy = pd.concat(w_buy_list, axis=1).sum(axis=1)
sum_sell = pd.concat(w_sell_list, axis=1).sum(axis=1)
p_buy_list = []
p_sell_list = []
for w_buy_level, w_sell_level in zip(w_buy_list, w_sell_list):
p_buy_list.append((df['bidVolume{}'.format(level)] * w_buy_level) / sum_buy)
p_sell_list.append((df['askVolume{}'.format(level)] * w_sell_level) / sum_sell)
p_buy = pd.concat(p_buy_list, axis=1).sum(axis=1)
p_sell = pd.concat(p_sell_list, axis=1).sum(axis=1)
p = np.log((p_sell + EPS) / (p_buy + EPS))
return p
@staticmethod
def _weighted_price(df):
"""
Weighted price: The average price of ask and bid weighted
by corresponding volumn (divided by last price).
Unit: One
"""
price_list = []
for level in range(1, 6):
price_level = (df['bidPrice{}'.format(level)] * df['bidVolume{}'.format(level)] + \
df['askPrice{}'.format(level)] * df['askVolume{}'.format(level)]) / \
(df['bidVolume{}'.format(level)] + df['askVolume{}'.format(level)])
price_list.append(price_level)
weighted_price = pd.concat(price_list, axis=1).mean(axis=1)
weighted_price = weighted_price / (df['lastPrice'] + 1e-5)
return weighted_price
@staticmethod
def _order_imblance(df):
"""
Order imbalance:
a situation resulting from an excess of buy or sell orders
for a specific security on a trading exchange,
making it impossible to match the orders of buyers and sellers.
Unit: One
"""
oi_list = []
for level in range(1, 6):
oi_level = (df['bidVolume{}'.format(level)] - df['askVolume{}'.format(level)]) / \
(df['bidVolume{}'.format(level)] + df['askVolume{}'.format(level)])
oi_list.append(oi_level)
oi = pd.concat(oi_list, axis=1).mean(axis=1)
return oi
@staticmethod
def _trend_strength(df, n):
"""
Trend strength: describes the strength of the short-term trend.
Unit: One
"""
mid = (df['askPrice1'] + df['bidPrice1']) / 2
diff_mid = mid - mid.shift(1)
sum1 = diff_mid.rolling(n).sum()
sum2 = diff_mid.abs().rolling(n).sum()
TS = sum1 / sum2
return TS
def process_file(self, paths, debug=True):
csv_path, pkl_path = paths
# Step 1: Read data
data = pd.read_csv(csv_path, index_col=0)
csv_shape0, csv_shape1 = data.shape
# Filter out abnormal files (e.g., the stock is not traded on this day)
if csv_shape0 == 1:
return dict(csv_path=csv_path, pkl_path=pkl_path, status='EMPTY')
if data['volume'].max() <= 0:
return dict(csv_path=csv_path, pkl_path=pkl_path, status='NO_VOL')
if data['lastPrice'][data['lastPrice'] > 0].mean() >= 1.09 * data['prevClosePrice'].values[0]:
return dict(csv_path=csv_path, pkl_path=pkl_path, status='LIMIT_UP')
if data['lastPrice'][data['lastPrice'] > 0].mean() <= 0.91 * data['prevClosePrice'].values[0]:
return dict(csv_path=csv_path, pkl_path=pkl_path, status='LIMIT_DO')
if debug:
print('Current process: {} {} Shape: {}'.format(csv_path, pkl_path, data.shape))
# assert csv_shape1 == 34
# Step 2: Formatting the raw data
trade_date = data.iloc[0]['tradeDate']
data.index = pd.DatetimeIndex(trade_date + ' ' + data['dataTime'])
data = data.resample('3S', closed='right', label='right').last().fillna(method='ffill')
data['time'] = data.index
# Calculate delta values
data['volume_dt'] = (data['volume'] - data['volume'].shift(1)).fillna(0)
data['value_dt'] = (data['value'] - data['value'].shift(1)).fillna(0)
# Exclude call auction
data = data[data['time'].between(trade_date + ' 09:30:00', trade_date + ' 14:57:00')]
data = data[~data['time'].between(trade_date + ' 11:30:01', trade_date + ' 12:59:59')]
# Step 3: Backtest required bar-level information
# Convert to 1min bar
# 1) current snapshot (5 levels of ask/bid price/volume)
# 2) the lowest/highest ask/bid price that yields partial execution
ask1_deal_volume_tick = ((data['value_dt'] - data['volume_dt'] * data['bidPrice1']) \
/ (data['askPrice1'] - data['bidPrice1'])).clip(upper=data['volume_dt'], lower=0)
bid1_deal_volume_tick = ((data['volume_dt'] * data['askPrice1'] - data['value_dt']) \
/ (data['askPrice1'] - data['bidPrice1'])).clip(upper=data['volume_dt'], lower=0)
time_interval = '3s'
# 'T': 1 min
# '3s'
max_last_price = data['lastPrice'].resample(time_interval).max().reindex(data.index).fillna(method='ffill')
min_last_price = data['lastPrice'].resample(time_interval).min().reindex(data.index).fillna(method='ffill')
ask1_deal_volume = ((data['askPrice1'] == max_last_price) * ask1_deal_volume_tick).resample(time_interval).sum()
bid1_deal_volume = ((data['bidPrice1'] == min_last_price) * bid1_deal_volume_tick).resample(time_interval).sum()
max_last_price = data['askPrice1'].resample(time_interval).max()
min_last_price = data['bidPrice1'].resample(time_interval).min()
# Current 5-level ask/bid price/volume (for modeling temporary market impact of MOs)
level_infos = ['bidPrice1', 'bidVolume1', 'bidPrice2', 'bidVolume2', 'bidPrice3', 'bidVolume3', 'bidPrice4',
'bidVolume4', 'bidPrice5', 'bidVolume5', 'askPrice1', 'askVolume1', 'askPrice2', 'askVolume2', 'askPrice3',
'askVolume3', 'askPrice4', 'askVolume4', 'askPrice5', 'askVolume5']
bar_data = data[level_infos].resample(time_interval).first()
# Fix a common bug in data: level data is missing in the last snapshot
bar_data.iloc[-1].replace(0.0, np.nan, inplace=True)
bar_data.fillna(method='ffill', inplace=True)
# Lowest ask/bid executable price and volume till the next bar (for modeling temporary market impact of LOs)
bar_data['max_last_price'] = max_last_price
bar_data['min_last_price'] = min_last_price
bar_data['ask1_deal_volume'] = ask1_deal_volume
bar_data['bid1_deal_volume'] = bid1_deal_volume
# Step 4: Generate state features
# Normalization constant
bar_data['basis_price'] = data['openPrice'].values[0]
bar_data['basis_volume'] = data['volume'].values[
-1] # TODO: change this to total volume of the last day instead of the current day
# Bar information
bar_data['high_price'] = data['lastPrice'].resample(time_interval, closed='right', label='right').max()
bar_data['low_price'] = data['lastPrice'].resample(time_interval, closed='right', label='right').min()
bar_data['high_low_price_diff'] = bar_data['high_price'] - bar_data['low_price']
bar_data['open_price'] = data['lastPrice'].resample(time_interval, closed='right', label='right').first()
bar_data['close_price'] = data['lastPrice'].resample(time_interval, closed='right', label='right').last()
bar_data['volume'] = data['volume_dt'].resample(time_interval, closed='right', label='right').sum()
bar_data['vwap'] = data['value_dt'].resample(time_interval, closed='right', label='right').sum() / bar_data['volume']
bar_data['vwap'] = bar_data['vwap'].fillna(bar_data['close_price'])
# LOB features
bar_data['ask_bid_spread'] = bar_data['askPrice1'] - bar_data['bidPrice1']
bar_data['ab_volume_misbalance'] = \
(bar_data['askVolume1'] + bar_data['askVolume2'] + bar_data['askVolume3'] + bar_data['askVolume4'] +
bar_data['askVolume5']) \
- (bar_data['bidVolume1'] + bar_data['bidVolume2'] + bar_data['bidVolume3'] + bar_data['bidVolume4'] +
bar_data['bidVolume5'])
bar_data['transaction_net_volume'] = (ask1_deal_volume_tick - bid1_deal_volume_tick).resample(time_interval,
closed='right',
label='right').sum()
bar_data['volatility'] = data['lastPrice'].rolling(20, min_periods=1).std().fillna(0).resample(time_interval,
closed='right',
label='right').last()
bar_data['trend'] = (data['lastPrice'] - data['lastPrice'].shift(20)).fillna(0).resample(time_interval, closed='right',
label='right').last()
bar_data['immediate_market_order_cost_ask'] = self._calculate_immediate_market_order_cost(bar_data, 'ask')
bar_data['immediate_market_order_cost_bid'] = self._calculate_immediate_market_order_cost(bar_data, 'bid')
# new LOB features
bar_data['VOLR'] = self._VOLR(data).fillna(0).resample(time_interval, closed='right', label='right').last()
bar_data['PCTN_1min'] = self._PCTN(data, n=20).fillna(0).resample(time_interval, closed='right', label='right').last()
bar_data['MidMove_1min'] = self._MidMove(data, n=20).fillna(0).resample(time_interval, closed='right',
label='right').last()
bar_data['BSP'] = self._BSP(data).fillna(0).resample(time_interval, closed='right', label='right').last()
bar_data['weighted_price'] = self._weighted_price(data).fillna(0).resample(time_interval, closed='right',
label='right').last()
bar_data['order_imblance'] = self._order_imblance(data).fillna(0).resample(time_interval, closed='right',
label='right').last()
bar_data['trend_strength'] = self._trend_strength(data, n=20).fillna(0).resample(time_interval, closed='right',
label='right').last()
bar_data['time'] = bar_data.index
bar_data = bar_data[bar_data['time'].between(trade_date + ' 09:30:00', trade_date + ' 14:57:00')]
bar_data = bar_data[~bar_data['time'].between(trade_date + ' 11:30:01', trade_date + ' 12:59:59')]
bar_data['time_diff'] = (bar_data['time'] - bar_data['time'].values[0]) / np.timedelta64(1, 'm') / 330
bar_data = bar_data.reset_index(drop=True)
# Step 5: Save to pickle
with open(pkl_path, 'wb') as f:
pickle.dump(bar_data, f, pickle.HIGHEST_PROTOCOL)
return dict(csv_path=csv_path, pkl_path=pkl_path,
csv_shape0=csv_shape0, csv_shape1=csv_shape1,
res_shape0=bar_data.shape[0], res_shape1=bar_data.shape[1])
@staticmethod
def _calculate_immediate_market_order_cost(bar_data, direction='ask'):
# Assume the market order quantity is 1/500 of the basis volume
remaining_quantity = (bar_data['basis_volume'] / 500).copy()
total_fee = pd.Series(0, index=bar_data.index)
for i in range(1, 6):
total_fee = total_fee \
+ bar_data['{}Price{}'.format(direction, i)] \
* np.minimum(bar_data['{}Volume{}'.format(direction, i)], remaining_quantity)
remaining_quantity = (remaining_quantity - bar_data['{}Volume{}'.format(direction, i)]).clip(lower=0)
if direction == 'ask':
return total_fee / (bar_data['basis_volume'] / 500) - bar_data['askPrice1']
elif direction == 'bid':
return bar_data['bidPrice1'] - total_fee / (bar_data['basis_volume'] / 500)
def obtain_file_paths(self):
file_paths = []
tickers = os.listdir(self.config.path_raw_data)
if '.DS_Store' in tickers:
tickers.remove('.DS_Store')
for ticker in tickers:
dates = os.listdir(os.path.join(self.config.path_raw_data, ticker))
file_paths.extend([
(os.path.join(self.config.path_raw_data, ticker, date),
os.path.join(self.config.path_pkl_data, ticker, date.split('.')[0] + '.pkl')) for date in dates])
os.makedirs(os.path.join(self.config.path_pkl_data, ticker), exist_ok=True)
return file_paths
# Support the data interation in the simulated environment
class Data(object):
price_5level_features = [
'bidPrice1', 'bidPrice2', 'bidPrice3', 'bidPrice4', 'bidPrice5',
'askPrice1', 'askPrice2', 'askPrice3', 'askPrice4', 'askPrice5',
]
other_price_features = [
'high_price', 'low_price', 'open_price', 'close_price', 'vwap',
]
price_delta_features = [
'ask_bid_spread', 'trend', 'immediate_market_order_cost_ask',
'immediate_market_order_cost_bid', 'volatility', 'high_low_price_diff',
]
volume_5level_features = [
'bidVolume1', 'bidVolume2', 'bidVolume3', 'bidVolume4', 'bidVolume5',
'askVolume1', 'askVolume2', 'askVolume3', 'askVolume4', 'askVolume5',
]
other_volume_features = [
'volume', 'ab_volume_misbalance', 'transaction_net_volume',
'VOLR', 'BSP',
]
backtest_lo_features = [
'max_last_price', 'min_last_price', 'ask1_deal_volume', 'bid1_deal_volume',
]
def __init__(self, config):
self.config = config
self.data = None
self.backtest_data = None
def _maintain_backtest_data(self):
self.backtest_data = \
self.data[self.price_5level_features + self.volume_5level_features + self.backtest_lo_features].copy()
self.backtest_data['latest_price'] = \
(self.backtest_data['askPrice1'] + self.backtest_data['bidPrice1']) / 2
def _normalization(self):
# Keep normalization units
self.basis_price = self.backtest_data.loc[self.start_index, 'latest_price']
self.basis_volume = self.data['basis_volume'].values[0]
# Approximation: Average price change 2% * 50 = 1.0
self.data[self.price_5level_features] = \
(self.data[self.price_5level_features] - self.basis_price) / self.basis_price * 50
self.data[self.other_price_features] = \
(self.data[self.other_price_features] - self.basis_price) / self.basis_price * 50
self.data[self.price_delta_features] = \
self.data[self.price_delta_features] / self.basis_price * 10
# Such that the volumes are equally distributed in the range [-1, 1]
self.data[self.volume_5level_features] = \
self.data[self.volume_5level_features] / self.basis_volume * 100
self.data[self.other_volume_features] = \
self.data[self.other_volume_features] / self.basis_volume * 100
def data_exists(self, code='300733.XSHE', date='2021-09-24'):
return os.path.isfile(os.path.join(self.config.path_pkl_data, code, date + '.pkl'))
def obtain_data(self, code='FINRL_4078', date='2020-12-16', start_index=None, do_normalization=True):
with open(os.path.join(self.config.path_pkl_data, code, date + '.pkl'), 'rb') as f:
self.data = pickle.load(f)
# assert self.data.shape[0] == 239, \
# 'The data should be of the shape (239, 42), instead of {}'.format(self.data.shape)
if start_index is None:
# randomly choose a valid start_index
start_index = self._random_valid_start_index()
self._set_horizon(start_index)
else:
self._set_horizon(start_index)
assert self._sanity_check(), "code={} date={} with start_index={} is invalid".format(code, date, start_index)
self._maintain_backtest_data()
if do_normalization:
self._normalization()
def _random_valid_start_index(self):
cols = ['bidPrice1', 'bidVolume1', 'askPrice1', 'askVolume1']
tmp = (self.data[cols] > 0).all(axis=1)
tmp1 = tmp.rolling(self.config.simulation_loockback_horizon).apply(lambda x: x.all())
tmp2 = tmp[::-1].rolling(self.config.simulation_planning_horizon + 1).apply(lambda x: x.all())[::-1]
available_indx = tmp1.loc[(tmp1 > 0) & (tmp2 > 0)].index.tolist()
assert len(available_indx) > 0, "The data is invalid"
return np.random.choice(available_indx)
def random_start_index(self):
"""deprecated"""
return np.random.randint(self.config.simulation_loockback_horizon - 1, 239 - self.config.simulation_planning_horizon)
def pick_horizon(self):
"""deprecated"""
self.start_index = np.random.randint(self.config.simulation_loockback_horizon - 1, 239 - self.config.simulation_planning_horizon)
self.current_index = self.start_index
self.end_index = self.start_index + self.config.simulation_planning_horizon
def _set_horizon(self, start_index):
self.start_index = start_index
self.current_index = self.start_index
self.end_index = self.start_index + self.config.simulation_planning_horizon
def obtain_features(self, do_flatten=True):
features = self.data.loc[self.current_index - self.config.simulation_loockback_horizon + 1: self.current_index,
self.config.simulation_features][::-1].values
if do_flatten:
return features.flatten()
else:
return features
def obtain_future_features(self, features):
return self.data.loc[self.current_index:self.end_index, features]
def obtain_level(self, name, level=''):
return self.backtest_data.loc[self.current_index, '{}{}'.format(name, level)]
def step(self):
self.current_index += 1
def _sanity_check(self):
""" When the price reaches daily limit, the price and volume"""
cols = ['bidPrice1', 'bidVolume1', 'askPrice1', 'askVolume1']
if (self.data.loc[self.start_index:self.end_index, cols] == 0).any(axis=None):
return False
else:
return True
class BaseWrapper(object):
def __init__(self, env):
self.env = env
def reset(self, code=None, date=None, start_index=None):
return self.env.reset(code, date, start_index)
def step(self, action):
return self.env.step(action)
@property
def quantity(self):
return self.env.quantity
@property
def total_quantity(self):
return self.env.total_quantity
@property
def cash(self):
return self.env.cash
@property
def config(self):
return self.env.config
@property
def data(self):
return self.env.data
@property
def observation_dim(self):
return self.env.observation_dim
def get_metric(self, mtype='IS'):
return self.env.get_metric(mtype)
def get_future(self, features, padding=None):
return self.env.get_future(features, padding=padding)
class DiscreteActionBaseWrapper(BaseWrapper):
def __init__(self, env):
super(DiscreteActionBaseWrapper, self).__init__(env)
@property
def action_sample_func(self):
return lambda: np.random.randint(len(self.discrete_actions))
@property
def action_dim(self):
return len(self.discrete_actions)
class DiscretePriceQuantityWrapper(DiscreteActionBaseWrapper):
def __init__(self, env):
super(DiscretePriceQuantityWrapper, self).__init__(env)
self.discrete_actions = self.config.simulation_discrete_actions
self.simulation_discrete_quantities = self.config.simulation_discrete_quantities
self.base_quantity_ratio = self.config.simulation_volume_ratio \
/ self.config.simulation_num_shares / self.simulation_discrete_quantities
def step(self, action):
price, quantity = self.discrete_actions[action]
price = np.round((1 + price / 10000) * self.data.obtain_level('askPrice', 1) * 100) / 100
quantity = self.data.basis_volume * self.base_quantity_ratio * quantity
return self.env.step(dict(price=price, quantity=quantity))
class DiscreteQuantityNingWrapper(DiscreteActionBaseWrapper):
"""
Follows [Ning et al 2020]
Divide the remaining quantity into several parts and trade using MO
"""
def __init__(self, env):
super(DiscreteQuantityNingWrapper, self).__init__(env)
self.discrete_actions = self.config.simulation_discrete_actions
def step(self, action):
quantity = self.discrete_actions[action]
# This ensures that this can be an MO
price = -50
price = np.round((1 + price / 10000) * self.data.obtain_level('askPrice', 1) * 100) / 100
quantity = self.discrete_actions[action] / (len(self.discrete_actions) - 1) * self.quantity
return self.env.step(dict(price=price, quantity=quantity))
class DiscreteQuantityWrapper(DiscreteActionBaseWrapper):
"""
Specify the quantity and trade using MO
"""
def __init__(self, env):
super(DiscreteQuantityWrapper, self).__init__(env)
self.discrete_actions = self.config.simulation_discrete_actions
def step(self, action):
quantity = self.discrete_actions[action]
# This ensures that this can be an MO
price = -50
price = np.round((1 + price / 10000) * self.data.obtain_level('askPrice', 1) * 100) / 100
quantity = self.discrete_actions[action] / (len(self.discrete_actions) - 1) * self.total_quantity
return self.env.step(dict(price=price, quantity=quantity))
class DiscretePriceWrapper(DiscreteActionBaseWrapper):
"""
The quantity is fixed and equals to total_quantity
"""
def __init__(self, env):
super(DiscretePriceWrapper, self).__init__(env)
self.discrete_actions = self.config.simulation_discrete_actions
self.num_shares = self.config.simulation_num_shares
def step(self, action):
price = self.discrete_actions[action]
price = np.round((1 + price / 10000) * self.data.obtain_level('askPrice', 1) * 100) / 100
quantity = self.total_quantity / self.num_shares
return self.env.step(dict(price=price, quantity=quantity))
class ContinuousActionWrapper(BaseWrapper):
def __init__(self, env):
super(ContinuousActionWrapper, self).__init__(env)
self.fixed_quantity_ratio = self.config.simulation_volume_ratio / self.config.simulation_num_shares
self.num_shares = self.config.simulation_num_shares
def step(self, action):
price = self.continuous_action_scale * action
price = np.round((1 + price / 10000) * self.data.obtain_level('askPrice', 1) * 100) / 100
quantity = self.total_quantity / self.num_shares
return self.env.step(dict(price=price, quantity=quantity))
def make_env(config):
# p 代表 price q 代表 quantity 表示动作空间的形式,是从离散的若干个价格中选择,还是离散的若干个量上选择等。
if config.simulation_action_type == 'discrete_p':
return DiscretePriceWrapper(ExecutionEnv(config))
elif config.simulation_action_type == 'continuous':
return ContinuousActionWrapper(ExecutionEnv(config))
elif config.simulation_action_type == 'discrete_pq':
return DiscretePriceQuantityWrapper(ExecutionEnv(config))
elif config.simulation_action_type == 'discrete_q_ning':
return DiscreteQuantityNingWrapper(ExecutionEnv(config))
elif config.simulation_action_type == 'discrete_q':
return DiscreteQuantityWrapper(ExecutionEnv(config))
class ExecutionEnv(object):
"""
Simulated environment for trade execution
Feature 1: There is no model misspecification error since the simulator is based on historical data.
Featrue 2: We can model temporary market impact for MO and LO.
For MO, we assume that the order book is resilient between bars
For LO, we assume that 1) full execution when the price passes through;
2) partial execution when the price reaches; 3) no execution otherwise
"""
def __init__(self, config):
self.config = config
self.current_code = None
self.current_date = None
self.data = Data(config)
self.cash = 0
self.total_quantity = 0
self.quantity = 0
self.valid_code_date_list = self.get_valid_code_date_list()
def get_valid_code_date_list(self):
code_date_list = []
for code in self.config.code_list:
for date in self.config.date_list:
if self.data.data_exists(code, date):
code_date_list.append((code, date))
return code_date_list
def reset(self, code=None, date=None, start_index=None):
count = 0
while True:
if code is None and date is None:
# Uniformly randomly select a code and date
ind = np.random.choice(len(self.valid_code_date_list))
self.current_code, self.current_date = self.valid_code_date_list[ind]
else:
self.current_code, self.current_date = code, date
try:
self.data.obtain_data(self.current_code, self.current_date, start_index)
break
except AssertionError as e:
count += 1
print('Invalid: code={} date={}'.format(self.current_code, self.current_date))
if count > 100:
raise ValueError("code={} date={} is invalid".format(code, date))
except Exception as e:
raise e
self.cash = 0
self.total_quantity = self.config.simulation_volume_ratio * self.data.basis_volume
self.quantity = self.total_quantity
self.latest_price = self.data.obtain_level('latest_price')
# Notice that we do not use the first time step of the day (with zero volum)
market_state = self.data.obtain_features(do_flatten=self.config.simulation_do_feature_flatten)
private_state = self._generate_private_state()
return market_state, private_state
def _generate_private_state(self):
elapsed_time = (self.data.current_index - self.data.start_index) / self.config.simulation_planning_horizon
remaining_quantity = self.quantity / self.total_quantity
return np.array([elapsed_time, remaining_quantity])
def get_future(self, features, padding=None):
future = self.data.obtain_future_features(features)
if padding is None:
return future
else:
padding_width = padding - future.shape[0]
future = np.pad(future, ((0, padding_width), (0, 0)), 'edge')
return future
def step(self, action=dict(price=20.71, quantity=300)):
if self.config.simulation_direction == 'sell':
return self._step_sell(action)
else:
raise NotImplementedError
def _step_sell(self, action=dict(price=20.71, quantity=300)):
"""
We only consider limit orders.
If the price is no better than the market order,
it will be transformed to market order automatically.
"""
info = dict(
code=self.current_code,
date=self.current_date,
start_index=self.data.start_index,
end_index=self.data.end_index,
current_index=self.data.current_index
)
order_quantity = action['quantity']
pre_quantity = self.quantity
pre_cash = self.cash
price_penalty = 0.0
done = (self.data.current_index + 1 >= self.data.end_index)
if done:
action['price'] = 0.0
action['quantity'] = float('inf')
price_penalty = self.config.simulation_not_filled_penalty_bp / 10000 * self.data.basis_price
# Step 1: If can be executed immediately
for level in range(1, 6):
if action['quantity'] > 0 and action['price'] <= self.data.obtain_level('bidPrice', level):
executed_volume = min(self.data.obtain_level('bidVolume', level), action['quantity'], self.quantity)
self.cash += executed_volume * (self.data.obtain_level('bidPrice', level) - price_penalty)
self.quantity -= executed_volume
action['quantity'] -= executed_volume
# Liquidate all the remaining inventory on the last step
if done:
executed_volume = self.quantity
self.cash += executed_volume * (self.data.obtain_level('bidPrice', 5) - price_penalty)
self.quantity = 0
action['quantity'] = 0
# Step 2: If can be executed until the next bar
if action['price'] < self.data.obtain_level('max_last_price'):
executed_volume = min(self.quantity, action['quantity'])
self.cash += executed_volume * action['price']
self.quantity -= executed_volume
action['quantity'] -= executed_volume
elif action['price'] == self.data.obtain_level('max_last_price'):
executed_volume = min(self.quantity, action['quantity'], self.data.obtain_level('ask1_deal_volume'))
self.cash += executed_volume * action['price']
self.quantity -= executed_volume
action['quantity'] -= executed_volume
if action['quantity'] == order_quantity:
info['status'] = 'NOT_FILLED'
elif action['quantity'] == 0:
info['status'] = 'FILLED'
else:
info['status'] = 'PARTIAL_FILLED'
# Step 3: Reward/Done calculation
if not done:
self.data.step()
reward = self._calculate_reward_v1(pre_cash)
market_state = self.data.obtain_features(do_flatten=self.config.simulation_do_feature_flatten)
private_state = self._generate_private_state()
return market_state, private_state, reward, done, info
def _calculate_reward_v1(self, pre_cash):
_recommand_quantity = self.total_quantity * (self.data.end_index - self.data.current_index) \
/ self.config.simulation_planning_horizon
basic_reward = (self.cash - pre_cash) / self.data.basis_price / \
(self.data.basis_volume * self.config.simulation_volume_ratio / self.config.simulation_planning_horizon)
linear_reg = abs(self.quantity - _recommand_quantity) / \
(self.data.basis_volume * self.config.simulation_volume_ratio / self.config.simulation_planning_horizon)
return basic_reward - self.config.simulation_linear_reg_coeff * linear_reg
def _calculate_reward_v2(self, price_diff):
""" problematic """
_recommand_quantity = self.total_quantity * (self.data.end_index - self.data.current_index) \
/ self.config.simulation_planning_horizon
basic_reward = price_diff * self.quantity / self.data.basis_volume
linear_reg = self.data.basis_price * ((self.quantity - _recommand_quantity) ** 2) \
/ (self.data.basis_volume ** 2)
return basic_reward - self.config.simulation_linear_reg_coeff * linear_reg
@property
def observation_dim(self):
return len(self.config.simulation_features) * self.config.simulation_loockback_horizon
def get_metric(self, mtype='IS'):
# IS: implementation shortfall
if mtype == 'IS':
return self.data.basis_price * (self.total_quantity - self.quantity) - self.cash
# BP: bp over mid price TWAP
if mtype == 'BP':
if self.total_quantity == self.quantity:
return 0
avg_price = self.cash / (self.total_quantity - self.quantity)
TWAP_mid = self.data.backtest_data.loc[self.data.start_index:self.data.end_index, 'latest_price'].mean()
bp = (avg_price - TWAP_mid) / self.data.basis_price * 10000
return bp
def run_data_prepare():
Preprocess()
config = DefaultConfig()
DataPrepare(config)
def run_env_test():
config = DefaultConfig()
env = make_env(config)
market_state, private_state = env.reset()
print('market_state = {}'.format(market_state))
print('private_state = {}'.format(private_state))
print('snapshot = ')
print(env.data.backtest_data.loc[env.data.current_index])
market_state, private_state, reward, done, info = env.step(0)
print('market_state = {}'.format(market_state))
print('private_state = {}'.format(private_state))
print('reward = {}'.format(reward))
print('done = {}'.format(done))
print('info = {}'.format(info))
print('snapshot = ')
print(env.data.backtest_data.loc[env.data.current_index])
market_state, private_state, reward, done, info = env.step(0)
print('market_state = {}'.format(market_state))
print('private_state = {}'.format(private_state))
print('reward = {}'.format(reward))
print('done = {}'.format(done))
print('info = {}'.format(info))
if __name__ == '__main__':
run_data_prepare()
run_env_test()
| 42,395 | 41.651911 | 137 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/OrderExecution/order_execution_env.py
|
import os
import torch
from random import shuffle
from functorch import vmap
from shares_data_process import get_share_dicts_by_day
"""
Readme 写于 2022-11-08 17:28:39
## OrderExecutionEnv 订单执行仿真环境
### 什么是订单执行任务?
举例:我持有1000股茅台,想要在一个月内,拿到股票市场上卖掉,换取尽可能多的现金。
设置较高的价格卖出,能多换取现金,但自己持有的股票就无法在规定时限内卖出。
所以交易员会设计“订单执行策略”,根据市场行情,将很大的订单,拆分成可执行的小订单,尽量在规定时间内以更高价格卖出。
订单执行仿真环境:
我们为了让强化学习算法完成订单执行任务,设计了这些仿真环境。
- OrderExecutionEnv 是一个用CPU计算的 single env,但代码容易理解
- OrderExecutionVecEnv 是一个用GPU计算的 vectorized env,计算效率高
state,可观测的状态,特征数量:`self.state_dim = 4 + self.data_dicts[0]['tech_factors'].shape[1]`
- internal state:(会受到智能体动作的影响而改变的state)
- cash 现金 (现金不需要加入到状态里,因为我们不需要买东西)
- remain_quantity 剩余的需要被执行的订单数量,是整数
- quantity 当前时刻需要被执行的订单数量,是整数
- external state (不受智能体动作的影响而改变的state,随着仿真程度的提高,他们也有机会变成 internal state)
- remain_step_rate 剩余可执行的步数,除以可执行的总步数,所以它会慢慢从 1.0 减少到 0.0
- last_price 上一个时刻的收盘价,策略会学习这个价格的偏移量,用来得到这一时刻的订单执行价格
- tech_factor 我自己随便写的 技术特征,有一点点用,后期可以替换成专业的 technical factors
action,策略的动作,特征数量:2
- delta_price 调整后会得到挂到交易所的订单的价格 executed_price
- 根据上一时刻的价格,加上 delta_price,得到这一时刻的挂到交易所的订单的价格
- 相邻两个档位的最小价格变动是0.01,因此我们 让 -1.0~+1.0 的 delta_price乘以 price_scale=50*0.01
- delta_price 等于0 表示挂单价格等于上一时刻的最后成交价
- delta_price 等于-1表示用仿真环境设计的最低价格去挂单,反之,+1表示最高价格
- quantity_ratio 调整后会得到挂到交易所的订单的数量 executed_quantity
- 动作空间是 -1.0~+1.0,线性变换到 0.0~2.0后,得到 quantity_ratio
- reset时,根据剩余的挂单时间,以及剩余的挂单量,计算出基础挂单量self.quantity
以上设计的原因:
- 用固定的动作 (0, 0) 表示 delta_price=0, quantity_ratio=1.0,能得到一个baselines
- 没有让策略直接输出 挂单价格,而是输出 delta_price,能让策略在类似的state下,输出相似的action
- 没有让策略直接输出 挂单数量,而是输出 quantity_ratio,能限制策略的挂单上限,避免超过环境的仿真能力
注意,还有一些注释写在了 OrderExecutionVecEnv 里面,这些注释是偏向 GPU并行仿真工程实现的内容。
详细注释写在 OrderExecutionVecEnv 里,而不是 OrderExecutionEnv 里
"""
class OrderExecutionVecEnv:
"""
这个版本将基础成交量由动态改为静态
"""
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
share_name: str = '000768_XSHE', beg_date: str = '2022-09-01', end_date: str = '2022-09-03', ):
self.if_random = if_random # 设计随机的 reset,能让策略在更多样的state下学习,会提高策略泛化能力。
self.num_levels = 5 # 从10档行情中,选出 n_levels 个档位 用于仿真
self.price_scale = 25 # 策略网络输出的第一个动作特征,是订单的卖出价格与上一时刻的变化量,表示30个档位
self.volume_scale = 1e-2 # 自动设置订单执行任务里,需要被执行的订单的数量,是成交量的 volume_scale 倍
self.executed_scale = 2e-2 # last_price 里,订单的成交比率
assert self.volume_scale < self.executed_scale
'''stack state'''
self.n_stack = 8 # 保存 n_stack 个不同时刻t的state,用于堆叠state
self.n_state = [] # 保存 n_stack 个不同时刻t的state,用于堆叠state
'''device'''
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
# 为Vectorized Env 指定单张GPU设备进行并行计算
'''load data'''
self.max_len = None # 赋值为None,会在不调用env.reset() 就运行step的情况下,会主动引发可预见的错误
self.share_name = share_name # 当前被随机抽取出来的股票的名字
self.cumulative_returns = torch.zeros(0) # 赋值为 torch.zeros(0) 这个空张量,是有意强调他们是张量,而不是None
self.price = torch.zeros(0)
self.volume = torch.zeros(0)
self.ask_prices = torch.zeros(0) # indices=[1, max_level] 各个级别的成交价
self.bid_prices = torch.zeros(0) # indices=[1, max_level] 各个级别的成交价
self.ask_volumes = torch.zeros(0) # indices=[1, max_level] 各个级别的成交量
self.bid_volumes = torch.zeros(0) # indices=[1, max_level] 各个级别的成交量
self.tech_factors = torch.zeros(0)
self.total_quantity = torch.zeros(0) # 订单执行的目标成交量(希望在一天内达成这个目标成交量)
self.data_dicts = self.load_share_data_dicts(
data_dir='./shares_data_by_day', share_name=share_name,
beg_date=beg_date, end_date=end_date)
'''reset'''
self.t = 0 # 时刻t
self.cash = torch.zeros(0) # 现金,不需要加入的state里,因为我们只卖出,不买入
self.quantity = torch.zeros(0) # 基础成交量
self.total_asset = torch.zeros(0) # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
self.remain_quantity = torch.zeros(0) # 剩余成交量,智能体需要就是把商品都卖出,让它在最后变成0
'''env info'''
self.env_name = 'OrderExecutionVecEnv-v2'
self.num_envs = num_envs
self.max_step = max([data_dict['max_len'] for data_dict in self.data_dicts]) # 选取数据中最长的步数作为 max_step
self.state_dim = (4 + self.data_dicts[0]['tech_factors'].shape[1]) * self.n_stack
self.action_dim = 2
self.if_discrete = False
'''function for vmap'''
self.inplace_cash_quantity = vmap(
func=self._inplace_cash_quantity, in_dims=(0, 0, 0, None, None), out_dims=0
)
self._get_state = vmap(
func=lambda remain_quantity, quantity, remain_step_rate, last_price, tech_factor:
torch.hstack((remain_quantity, quantity, remain_step_rate, last_price, tech_factor)),
in_dims=(0, 0, None, None, None), out_dims=0
)
'''def get_data_dict'''
self.rand_id = 0
shuffle(self.data_dicts)
def get_data_dict(self):
self.rand_id += 1
if self.rand_id >= len(self.data_dicts):
self.rand_id = 0
shuffle(self.data_dicts)
return self.data_dicts[self.rand_id] # data_dict
def reset(self):
self.t = 0
'''load data from data_dict to device'''
data_dict = self.get_data_dict()
self.max_len = data_dict['max_len']
self.volume = data_dict['volume'].to(self.device)
self.price = data_dict['last_price'].to(self.device)
self.ask_prices = data_dict['ask_prices'].to(self.device)
self.bid_prices = data_dict['bid_prices'].to(self.device)
self.ask_volumes = data_dict['ask_volumes'].to(self.device)
self.bid_volumes = data_dict['bid_volumes'].to(self.device)
self.tech_factors = data_dict['tech_factors'].to(self.device)
total_quantity = data_dict['total_quantity'].to(self.device)
total_quantity = total_quantity.repeat(self.num_envs)
'''build internal state: cash'''
self.cash = torch.zeros(self.num_envs, dtype=torch.float32, device=self.device)
self.total_asset = self.cash.clone() # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
'''build internal state: quantity'''
self.quantity = total_quantity * self.executed_scale / self.max_len
total_quantity_scale = torch.arange(self.num_envs).to(self.device) / self.num_envs
total_quantity_scale = total_quantity_scale * 0.9 + 0.1 # range in [0.1, 0.9]
self.total_quantity = total_quantity * self.volume_scale * total_quantity_scale
if self.if_random:
self.quantity *= torch.rand_like(self.quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity *= torch.rand_like(self.total_quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity = torch.round(self.total_quantity)
self.remain_quantity = torch.zeros_like(self.cash) + self.total_quantity
'''stack state'''
state = self.get_state()
self.n_state = [state, ] * 24
return self.get_n_state()
def step(self, action):
self.t += 1
done = self.t == self.max_len
'''action''' # 对策略输出的-1.0~+1.0 的动作进行线性变化,得到仿真环境实际需要的 挂单价格 + 挂单数量
curr_price = self.get_curr_price(action[:, 0])
curr_quantity = self.get_curr_quantity(action[:, 1])
prev_quantity = curr_quantity.clone()
'''executed in current step immediately'''
for level in range(self.num_levels):
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.bid_prices[level, self.t], self.bid_volumes[level, self.t])
'''executed in next step'''
if not done:
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.price[self.t + 1], self.volume[self.t + 1] * self.executed_scale)
'''update remain_quantity'''
diff_quantity = curr_quantity - prev_quantity
self.remain_quantity += diff_quantity
'''get (state, reward, done)'''
total_asset = self.cash
reward = (total_asset - self.total_asset) * 2 ** -14
self.total_asset = self.cash.clone()
# state = self.reset() if done else self.get_state() # after self.t += 1
if done:
self.cumulative_returns = total_asset / (self.total_quantity * self.price.mean()) * 100 # 100%
n_state = self.reset()
else:
state = self.get_state()
self.n_state.append(state)
del self.n_state[0]
n_state = self.get_n_state()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return n_state, reward, done, {}
def get_state(self): # 得到智能体观测的状态
return self._get_state(self.remain_quantity / self.total_quantity,
self.quantity / self.total_quantity,
self.get_tensor(1 - self.t / self.max_len), # remain_step_rate
self.price[self.t] * 2 ** -3,
self.tech_factors[self.t])
def get_n_state(self):
return torch.hstack([self.n_state[i] for i in (-1, -2, -3, -5, -7, -11, -15, -24)])
def get_tensor(self, ary):
return torch.tensor(ary, dtype=torch.float32, device=self.device)
def get_curr_price(self, action_price):
delta_price = action_price * (self.price_scale * 0.01)
return self.price[self.t - 1] + delta_price # after self.t += 1
def get_curr_quantity(self, action_quantity):
quantity_ratio = action_quantity + 1
curr_quantity = torch.round(quantity_ratio * self.quantity)
curr_quantity = torch.min(torch.stack((self.remain_quantity, curr_quantity)), dim=0)[0]
return curr_quantity
@staticmethod
def _inplace_cash_quantity(cash, quantity, price, ask_price, ask_volume):
executed_volume = torch.min(quantity, ask_volume) * (price >= ask_price)
# 乘以 (price >= ask_price),相当于一个if,如果是False,那么 execute_volume 相当于是 0,等价于不执行这里的代码
# 进行这种处理,是因为 vmap 现阶段(2022-11-09)无法加速含有逻辑分支的代码,只能加速静态的代码
cash += executed_volume * price
quantity -= executed_volume
return torch.empty(0)
@staticmethod
def get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes):
"""
我随便写的根据 ask-bid 数据得到 特征的代码,用GPU计算,有微弱的效果
用于能检测仿真环境加入 technical factors 的模块是否正常运行
以后需要替换成更加专业的 technical factors
"""
ask_values = ask_prices * ask_volumes
bid_values = bid_prices * bid_volumes
mean_price = value / volume
delta_price = price - mean_price
ask_cum_values = torch.cumsum(ask_values, dim=0)
bid_cum_values = torch.cumsum(bid_values, dim=0)
ask_cum_volumes = torch.cumsum(ask_volumes, dim=0)
bid_cum_volumes = torch.cumsum(bid_volumes, dim=0)
ask_cum_prices = ask_cum_values / ask_cum_volumes
del ask_cum_values, ask_cum_volumes
bid_cum_prices = bid_cum_values / bid_cum_volumes
del bid_cum_values, bid_cum_volumes
v_adj_spreads = ask_cum_prices - bid_cum_prices
del ask_cum_prices, bid_cum_prices
'''normalization'''
tech_factors = torch.cat((
get_ts_trends(value * 2 ** -14, win_size=6, gap_size=6),
get_ts_trends(value * 2 ** -14, win_size=12, gap_size=8),
get_ts_trends(mean_price * 2 ** 3, win_size=6, gap_size=6),
get_ts_trends(mean_price * 2 ** 3, win_size=12, gap_size=8),
get_ts_trends(delta_price * 2 ** 9, win_size=6, gap_size=6),
get_ts_trends(delta_price * 2 ** 9, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[0] * 2 ** 6, win_size=6, gap_size=6),
get_ts_trends(v_adj_spreads[1] * 2 ** 6, win_size=8, gap_size=6),
get_ts_trends(v_adj_spreads[2] * 2 ** 6, win_size=8, gap_size=8),
get_ts_trends(v_adj_spreads[3] * 2 ** 6, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[4] * 2 ** 6, win_size=12, gap_size=12),
), dim=1)
torch.nan_to_num_(tech_factors, nan=0.0, posinf=0.0, neginf=0.0)
return tech_factors
def load_share_data_dicts(self, data_dir="./data",
share_name: str = '000768_XSHE',
beg_date='2022-09-01',
end_date='2022-09-30'):
assert share_name in {'000768_XSHE', '000685_XSHE'}
share_dir = f"{data_dir}/{share_name}"
share_dicts = get_share_dicts_by_day(share_dir=share_dir, share_name=share_name,
beg_date=beg_date, end_date=end_date,
n_levels=self.num_levels, n_days=5, device=self.device)
for share_dict in share_dicts:
for key, value in share_dict.items():
if isinstance(value, torch.Tensor):
share_dict[key] = value.to(torch.device('cpu'))
data_dicts = [] # 把不同股票的数据放在字典里,reset的时候会随机选择一只股票的数据,加载到GPU里,开始训练
print('| OrderExecutionEnv data pre processing:', share_name)
for i, share_dict in enumerate(share_dicts):
share_name = share_dict['share_name']
trade_date = share_dict['trade_date']
print(end=f'{trade_date} ')
print() if i % 8 == 7 else None
# 对这些订单流数据进行处理后,我们能得到一段时间内的 ask 和 bid 快照数据
ask_volumes = share_dict['ask_volumes'] # 各个级别的成交量
bid_volumes = share_dict['bid_volumes'] # 各个级别的成交量
ask_prices = share_dict['ask_prices'] # 各个级别的成交量
bid_prices = share_dict['bid_prices'] # 各个级别的成交量
volume = share_dict['volume'] # delta volume 成交的订单数量
price = share_dict['price'] # last price 最后成交价格
value = share_dict['value'] # delta value 成交金额总量,换手额度
tech_factors = self.get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes)
# 先保存到内存里,reset的时候才加载到GPU
data_dict = {
'share_name': share_name,
'max_len': price.shape[0] - 1,
'total_quantity': volume.sum(),
'volume': volume,
'last_price': price,
'ask_prices': ask_prices,
'bid_prices': bid_prices,
'ask_volumes': ask_volumes,
'bid_volumes': bid_volumes,
'tech_factors': tech_factors,
}
data_dicts.append(data_dict)
return data_dicts
class OrderExecutionMinuteVecEnv(OrderExecutionVecEnv):
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
share_name: str = '000768_XSHE', beg_date: str = '2022-09-01', end_date: str = '2022-09-03', ):
self.exec_level = 16 # 把聚合后的价格分为 exec_level 个档位
self.num_cluster = 20 # 把num_cluster 个快照聚合成一个,一个快照约3秒,那么 3秒*20=60秒
self.price_scale = 25 # 策略网络输出的第一个动作特征,是订单的卖出价格与上一时刻的变化量,表示30个档位
super(OrderExecutionMinuteVecEnv, self).__init__(num_envs=num_envs, gpu_id=gpu_id, if_random=if_random,
share_name=share_name, beg_date=beg_date, end_date=end_date)
'''stack state'''
self.n_stack = 8 # 保存 n_stack 个不同时刻t的state,用于堆叠state
self.n_state = [] # 保存 n_stack 个不同时刻t的state,用于堆叠state
'''load data'''
self.prices = torch.zeros(0)
self.volumes = torch.zeros(0)
def reset(self):
self.t = 0
'''load data from data_dict to device'''
data_dict = self.get_data_dict()
self.max_len = data_dict['max_len']
self.prices = data_dict['prices'].to(self.device)
self.volumes = data_dict['volumes'].to(self.device)
self.price = data_dict['price'].to(self.device)
self.volume = data_dict['volume'].to(self.device)
self.tech_factors = data_dict['tech_factors'].to(self.device)
total_quantity = data_dict['total_quantity'].to(self.device)
total_quantity = total_quantity.repeat(self.num_envs)
'''build internal state: cash'''
self.cash = torch.zeros(self.num_envs, dtype=torch.float32, device=self.device)
self.total_asset = self.cash.clone() # 总资产,总资产等于现金+商品折算为现金。(在订单执行任务里,商品折算为0现金)
'''build internal state: quantity'''
self.quantity = total_quantity * self.executed_scale / self.max_len
total_quantity_scale = torch.arange(self.num_envs).to(self.device) / self.num_envs
total_quantity_scale = total_quantity_scale * 0.9 + 0.1 # range in [0.1, 0.9]
self.total_quantity = total_quantity * self.volume_scale * total_quantity_scale
if self.if_random:
self.quantity *= torch.rand_like(self.quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity *= torch.rand_like(self.total_quantity) * 0.2 + 0.9 # range in [0.9, 1.1]
self.total_quantity = torch.round(self.total_quantity)
self.remain_quantity = torch.zeros_like(self.cash) + self.total_quantity
'''stack state'''
state = self.get_state()
self.n_state = [state, ] * 24
return self.get_n_state()
def step(self, action):
self.t += 1
done = self.t == self.max_len
'''action''' # 对策略输出的-1.0~+1.0 的动作进行线性变化,得到仿真环境实际需要的 挂单价格 + 挂单数量
curr_price = self.get_curr_price(action[:, 0])
curr_quantity = self.get_curr_quantity(action[:, 1])
prev_quantity = curr_quantity.clone()
'''executed'''
for level in range(self.exec_level):
self.inplace_cash_quantity(self.cash, curr_quantity, curr_price,
self.prices[self.t, level], self.volumes[self.t, level])
'''update remain_quantity'''
diff_quantity = curr_quantity - prev_quantity
self.remain_quantity += diff_quantity
'''get (state, reward, done)'''
total_asset = self.cash
reward = (total_asset - self.total_asset) * 2 ** -14
self.total_asset = self.cash.clone()
# state = self.reset() if done else self.get_state() # after self.t += 1
if done:
self.cumulative_returns = total_asset / (self.total_quantity * self.price.mean()) * 100 # 100%
n_state = self.reset()
else:
state = self.get_state()
self.n_state.append(state)
del self.n_state[0]
n_state = self.get_n_state()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return n_state, reward, done, {}
def get_state(self): # 得到智能体观测的状态
return self._get_state(self.remain_quantity / self.total_quantity,
self.quantity / self.total_quantity,
self.get_tensor(1 - self.t / self.max_len), # remain_step_rate
self.price[self.t] * 2 ** -3,
self.tech_factors[self.t])
def get_n_state(self):
return torch.hstack([self.n_state[i] for i in (-1, -2, -4, -8)])
def load_share_data_dicts(self, data_dir="./data",
share_name: str = '000768_XSHE',
beg_date='2022-09-01',
end_date='2022-09-30'):
assert share_name in {'000768_XSHE', '000685_XSHE'}
share_dir = f"{data_dir}/{share_name}"
share_dicts = get_share_dicts_by_day(share_dir=share_dir, share_name=share_name,
beg_date=beg_date, end_date=end_date,
n_levels=self.num_levels, n_days=5, device=self.device)
for share_dict in share_dicts:
for key, value in share_dict.items():
if isinstance(value, torch.Tensor):
share_dict[key] = value.to(torch.device('cpu'))
data_dicts = [] # 把不同股票的数据放在字典里,reset的时候会随机选择一只股票的数据,加载到GPU里,开始训练
print('| OrderExecutionEnv data pre processing:', share_name)
for i, share_dict in enumerate(share_dicts):
share_name = share_dict['share_name']
trade_date = share_dict['trade_date']
print(end=f'{trade_date} ')
print() if i % 8 == 7 else None
# 对这些订单流数据进行处理
price = share_dict['price'] # last price 最后成交价格
value = share_dict['value'] # delta value 成交金额总量,换手额度
volume = share_dict['volume'] # delta volume 成交的订单数量
ask_prices = share_dict['ask_prices'] # 各个级别的成交量
bid_prices = share_dict['bid_prices'] # 各个级别的成交量
ask_volumes = share_dict['ask_volumes'] # 各个级别的成交量
bid_volumes = share_dict['bid_volumes'] # 各个级别的成交量
'''进行聚合'''
prices, volumes = self.tick_to_minute_data(volume=volume, value=value)
'''进行聚合'''
n_step = price.shape[0] // self.num_cluster
# 进行聚合
price = price[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).mean(dim=1)
value = value[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).sum(dim=1)
volume = volume[:n_step * self.num_cluster].reshape((n_step, self.num_cluster)).sum(dim=1)
ask_prices = ask_prices[:, 0:n_step * self.num_cluster:self.num_cluster]
bid_prices = bid_prices[:, 0:n_step * self.num_cluster:self.num_cluster]
ask_volumes = ask_volumes[:, 0:n_step * self.num_cluster:self.num_cluster]
bid_volumes = bid_volumes[:, 0:n_step * self.num_cluster:self.num_cluster]
tech_factors = self.get_tech_factors(volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes)
# 先保存到内存里,reset的时候才加载到GPU
data_dict = {
'share_name': share_name,
'max_len': price.shape[0] - 1,
'total_quantity': volume.sum(),
'price': price,
'volume': volume,
'prices': prices,
'volumes': volumes,
'tech_factors': tech_factors,
}
data_dicts.append(data_dict)
'''add the price and volume of previous day'''
for i, curr_dict in enumerate(data_dicts):
'''prev_dict'''
j = max(0, i - 1)
prev_dict = data_dicts[j]
prev_price = prev_dict['price']
prev_price_rate = prev_price / prev_price.mean()
prev_volume = prev_dict['volume']
prev_volume_rate = prev_volume / prev_volume.mean()
'''curr_dict'''
tech_factors = curr_dict['tech_factors']
tech_price_rate = self.get_diff_stack_tensor(prev_price_rate, tech_factors)
tech_volume_rate = self.get_diff_stack_tensor(prev_volume_rate, tech_factors)
'''append to tech_factors'''
curr_dict['tech_factors'] = torch.cat((tech_factors, tech_price_rate, tech_volume_rate), dim=1)
return data_dicts
@staticmethod
def get_diff_stack_tensor(prev_tensor, curr_tensor):
prev_len = prev_tensor.shape[0]
curr_len = curr_tensor.shape[0]
max_len = min(prev_len, curr_len)
tech_prices = torch.ones((curr_len, 8), dtype=torch.float32, device=curr_tensor.device)
tech_prices[:max_len, 0] = prev_tensor[:max_len]
tech_prices[:max_len - 2, 1] = prev_tensor[2:max_len]
tech_prices[:max_len - 4, 2] = prev_tensor[4:max_len]
tech_prices[:max_len - 6, 3] = prev_tensor[6:max_len]
tech_prices[:max_len - 9, 4] = prev_tensor[9:max_len]
tech_prices[:max_len - 15, 5] = prev_tensor[15:max_len]
tech_prices[2:max_len, 6] = prev_tensor[:max_len - 2]
tech_prices[5:max_len, 7] = prev_tensor[:max_len - 5]
return tech_prices
def get_tech_factors(self, volume, price, value,
ask_prices, ask_volumes,
bid_prices, bid_volumes):
"""
我随便写的根据 ask-bid 数据得到 特征的代码,用GPU计算,有微弱的效果
用于能检测仿真环境加入 technical factors 的模块是否正常运行
以后需要替换成更加专业的 technical factors
"""
ask_values = ask_prices * ask_volumes
bid_values = bid_prices * bid_volumes
mean_price = value / volume
delta_price = price - mean_price
ask_cum_values = torch.cumsum(ask_values, dim=0)
bid_cum_values = torch.cumsum(bid_values, dim=0)
ask_cum_volumes = torch.cumsum(ask_volumes, dim=0)
bid_cum_volumes = torch.cumsum(bid_volumes, dim=0)
ask_cum_prices = ask_cum_values / ask_cum_volumes
del ask_cum_values, ask_cum_volumes
bid_cum_prices = bid_cum_values / bid_cum_volumes
del bid_cum_values, bid_cum_volumes
v_adj_spreads = ask_cum_prices - bid_cum_prices
del ask_cum_prices, bid_cum_prices
'''normalization'''
tech_factors = torch.cat((
get_ts_trends(value * 2 ** -14, win_size=12, gap_size=8),
get_ts_trends(mean_price * 2 ** 3, win_size=6, gap_size=6),
get_ts_trends(mean_price * 2 ** 3, win_size=12, gap_size=8),
get_ts_trends(delta_price * 2 ** 9, win_size=6, gap_size=6),
get_ts_trends(delta_price * 2 ** 9, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[0] * 2 ** 6, win_size=6, gap_size=6),
get_ts_trends(v_adj_spreads[1] * 2 ** 6, win_size=8, gap_size=6),
get_ts_trends(v_adj_spreads[2] * 2 ** 6, win_size=8, gap_size=8),
get_ts_trends(v_adj_spreads[3] * 2 ** 6, win_size=12, gap_size=8),
get_ts_trends(v_adj_spreads[4] * 2 ** 6, win_size=12, gap_size=12),
), dim=1)
torch.nan_to_num_(tech_factors, nan=0.0, posinf=0.0, neginf=0.0)
return tech_factors
def tick_to_minute_data(self, volume, value):
n_step = volume.shape[0] // self.num_cluster
device = volume.device
value = value[:n_step * self.num_cluster].reshape((n_step, self.num_cluster))
volume = volume[:n_step * self.num_cluster].reshape((n_step, self.num_cluster))
price = torch.nan_to_num_(value / volume, nan=0.0)
volume_norm = volume / volume.mean(dim=1, keepdim=True)
price_avg = (volume_norm * price).mean(dim=1, keepdim=True)
price_std = (volume_norm * (price - price_avg) ** 2).mean(dim=1, keepdim=True)
num_k = torch.arange(self.exec_level + 1, dtype=torch.float32, device=device) # range[0, self.exec_level]
num_k = num_k * (3 / self.exec_level) - 1 # range [-1, 2]
std_k = num_k * (-50) # range [50, -100]
std_k = std_k.unsqueeze(0)
prices = price_avg + price_std * std_k # price from high to low
vol_k = torch.exp(-num_k ** 2 / 2) # / (torch.pi*2)**0.5 = Probability Density Function with sigma=1.0
vol_k = vol_k / vol_k.sum() # sigma~=0.3, and the area of func PDF range[-0.3, 0.6] ~= 1.0
vol_k = vol_k.unsqueeze(0)
volumes = volume.sum(dim=1, keepdim=True) * vol_k
return prices, volumes
class OrderExecutionVecEnvForEval(OrderExecutionVecEnv):
def __init__(self, num_envs: int = 4, gpu_id: int = 0, if_random=False,
beg_date: str = '2022-09-01', end_date: str = '2022-09-03', share_name='000685_XSHE'):
OrderExecutionVecEnv.__init__(self, num_envs=num_envs, gpu_id=gpu_id, if_random=if_random,
beg_date=beg_date, end_date=end_date, share_name=share_name)
self.curr_price = None
self.curr_quantity = None
self.cumulative_returns_days = []
def reset(self):
self.rand_id = 0
self.cumulative_returns_days = []
return super().reset()
def step(self, action): # modified_mark
n_state, reward, done, info_dict = super().step(action)
if done[0]: # modified_mark
self.cumulative_returns_days.append(self.cumulative_returns)
self.cumulative_returns = torch.stack(self.cumulative_returns_days).mean(dim=0)
data_dict = self.data_dicts[self.rand_id]
self.bid_prices = data_dict['bid_prices'].to(self.device) # ForPlot
self.bid_volumes = data_dict['bid_volumes'].to(self.device) # ForPlot
return n_state, reward, done, info_dict
def get_curr_price(self, action_price):
self.curr_price = super().get_curr_price(action_price)
return self.curr_price
def get_curr_quantity(self, action_quantity):
self.curr_quantity = super().get_curr_quantity(action_quantity)
return self.curr_quantity
'''get_tech_factors'''
def get_re_cum_sum(ten):
cum_sum = torch.cumsum(ten, dim=0)
return ten - cum_sum + cum_sum[-1:None]
def get_all_cum_sum(level_tensors):
level_cum = level_tensors.clone()
for i in range(1, level_tensors.shape[1]):
level_cum[i] += level_cum[i - 1]
return level_cum
def get_ts_avg_std(ten, win_size=6): # could be higher performance
avg = torch.zeros_like(ten)
std = torch.zeros_like(ten)
for i in range(win_size, avg.shape[0]):
tmp = ten[i - win_size:i]
avg[i] = tmp.mean(dim=0)
std[i] = tmp.std(dim=0)
return avg, std
def get_ts_diff(ten, gap_size=6):
out = torch.zeros_like(ten)
out[gap_size:] = ten[gap_size:] - ten[:-gap_size]
return out
def get_ts_trends(ten, win_size=6, gap_size=6):
avg, std = get_ts_avg_std(ten, win_size)
avg_diff = get_ts_diff(avg, gap_size)
std_diff = get_ts_diff(std, gap_size)
return torch.stack((avg, avg_diff, std, std_diff), dim=1)
"""run"""
def check_with_twap():
num_envs = 2
share_name = ['000768_XSHE', '000685_XSHE'][0]
beg_date = '2022-09-01'
end_date = '2022-09-01'
# env = OrderExecutionVecEnv(num_envs=num_envs, gpu_id=0, if_random=False,
# share_name=share_name, beg_date=beg_date, end_date=end_date)
env = OrderExecutionMinuteVecEnv(num_envs=num_envs, gpu_id=0, if_random=False,
share_name=share_name, beg_date=beg_date, end_date=end_date)
env.reset()
action = torch.zeros((num_envs, env.action_dim), dtype=torch.float32, device=env.device)
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
for i in range(env.max_step):
state, reward, done, _ = env.step(action)
cumulative_rewards += reward
if i % 64 == 0:
env_cumulative_rewards = env.total_asset / env.total_quantity
print(f"{i:8} {str(env_cumulative_rewards):64} {env.remain_quantity} {reward}")
print(env.total_asset / env.total_quantity)
print(env.total_asset)
print(env.remain_quantity)
print(f'cumulative_returns {env.cumulative_returns.mean():9.3f} {env.cumulative_returns.std(dim=0):9.3f}')
print(f'cumulative_rewards {cumulative_rewards.mean():9.3f} {cumulative_rewards.std(dim=0):9.3f}')
def run1201(): # plot
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 4
env = OrderExecutionVecEnv(num_envs=num_envs, beg_date='2022-09-14', end_date='2022-09-14')
env.if_random = False
env.reset()
action = torch.zeros((4, 2), dtype=torch.float32, device=env.device)
action[0, 1] = -1.0
action[1, 1] = 0.0
action[2, 1] = 0.5
action[3, 1] = 1.0
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
ary_remain_quantity = []
ary_cum_returns = []
ary_cash = []
ary_last_price = []
cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
for i in range(env.max_step):
state, reward, done, _ = env.step(action)
cumulative_rewards += reward
if done[0]:
break
ary_remain_quantity.append(env.remain_quantity.tolist())
ary_cum_returns.append((env.total_asset / env.total_quantity).tolist())
ary_cash.append(env.cash.tolist())
ary_last_price.append(env.price[env.t].tolist())
ary_remain_quantity = np.array(ary_remain_quantity)
ary_cum_returns = np.array(ary_cum_returns)
ary_cash = np.array(ary_cash)
ary_last_price = np.array(ary_last_price)
for env_i in range(1, num_envs):
# plt.plot(ary_remain_quantity[:, env_i])
# plt.plot(ary_cum_returns[:, env_i])
# plt.plot(ary_cash[:, env_i])
pass
plt.plot(ary_last_price)
plt.grid()
plt.show()
print(f'cumulative_returns {env.cumulative_returns.mean():9.3f} {env.cumulative_returns.std(dim=0):9.3f}')
print(f'cumulative_rewards {cumulative_rewards.mean():9.3f} {cumulative_rewards.std(dim=0):9.3f}')
if __name__ == '__main__':
check_with_twap()
| 33,366 | 41.559949 | 117 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/OrderExecution/demo.py
|
import sys
import gym
from elegantrl.run import train_agent, train_agent_multiprocessing
from elegantrl.config import Config, get_gym_env_args, build_env
from elegantrl.agent import AgentPPO
'''train'''
def train_ppo_a2c_for_order_execution_vec_env():
from OrderExecutionEnv import OrderExecutionVecEnv
num_envs = 2 ** 9
gamma = 0.999
n_stack = 8
agent_class = AgentPPO
env_class = OrderExecutionVecEnv
env_args = {'env_name': 'OrderExecutionVecEnv-v2',
'num_envs': num_envs,
'max_step': 5000,
'state_dim': 48 * n_stack,
'action_dim': 2,
'if_discrete': False,
'share_name': '000768_XSHE',
'beg_date': '2022-06-09',
'end_date': '2022-09-09',
'if_random': False}
if not env_args:
get_gym_env_args(env=OrderExecutionVecEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(1e6) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = gamma # discount factor of future rewards
args.horizon_len = 2 ** 9
args.batch_size = args.horizon_len * num_envs // 32
args.repeat_times = 4 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 1e-4
args.state_value_tau = 0.01
eval_num_envs = 16
args.save_gap = int(8)
args.if_keep_save = True
args.if_over_write = False
args.eval_per_step = int(4e3)
args.eval_times = eval_num_envs
from OrderExecutionEnv import OrderExecutionVecEnvForEval
args.eval_env_class = OrderExecutionVecEnvForEval
args.eval_env_args = env_args.copy()
args.eval_env_args['num_envs'] = eval_num_envs
args.eval_env_args['max_step'] = 4000 * 22
args.eval_env_args['beg_date'] = '2022-09-10'
args.eval_env_args['end_date'] = '2022-10-10'
args.gpu_id = GPU_ID
args.eval_gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
if_check = False
if if_check:
train_agent(args)
else:
train_agent_multiprocessing(args)
"""
0% < 100% < 120%
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
6 1.64e+04 559 | 100.75 0.3 88000 0 | -2.81 0.44 -0.03 -0.03
6 1.64e+04 559 | 100.75
6 1.64e+05 1025 | 101.19 0.5 88000 0 | -2.58 0.37 -0.10 -0.13
6 1.64e+05 1025 | 101.19
6 1.72e+05 1471 | 101.21 0.5 88000 0 | -2.58 0.50 0.01 -0.12
6 1.72e+05 1471 | 101.21
6 1.80e+05 1916 | 101.20 0.5 88000 0 | -2.60 0.27 -0.14 -0.11
6 1.88e+05 2362 | 101.21 0.5 88000 0 | -2.63 0.63 -0.19 -0.10
6 1.88e+05 2362 | 101.21
6 1.97e+05 2807 | 101.22 0.5 88000 0 | -2.64 0.58 -0.18 -0.10
6 1.97e+05 2807 | 101.22
6 2.05e+05 3253 | 101.24 0.5 88000 0 | -2.64 0.25 0.04 -0.09
6 2.05e+05 3253 | 101.24
6 2.13e+05 3698 | 101.24 0.5 88000 0 | -2.67 0.46 -0.05 -0.08
6 2.13e+05 3698 | 101.24
6 2.21e+05 4143 | 101.25 0.5 88000 0 | -2.68 0.33 -0.01 -0.07
6 2.21e+05 4143 | 101.25
6 2.29e+05 4589 | 101.26 0.5 88000 0 | -2.69 0.50 0.08 -0.06
6 2.29e+05 4589 | 101.26
6 2.38e+05 5034 | 101.27 0.5 88000 0 | -2.71 0.26 0.05 -0.05
6 2.38e+05 5034 | 101.27
"""
'''help users understand Vectorized env by comparing with single env'''
def train_ppo_a2c_for_bipedal_walker():
agent_class = AgentPPO # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {'env_name': 'BipedalWalker-v3',
'num_envs': 1,
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False}
get_gym_env_args(env=gym.make('BipedalWalker-v3'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = args.max_step * 3
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 1e-4
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.93
args.lambda_entropy = 0.02
args.clip_ratio = 0.4
args.eval_times = 16
args.eval_per_step = 8e4
args.if_keep_save = False # keeping save the checkpoint. False means save until stop training.
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
train_agent_multiprocessing(args) # train_agent(args)
"""
-200 < -150 < 300 < 330
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.92e+04 27 | -105.34 5.9 158 44 | -5.71 0.49 0.65 0.01
0 1.92e+04 27 | -105.34
0 1.06e+05 131 | -63.90 1.9 1600 0 | -5.96 0.37 0.13 0.07
0 1.06e+05 131 | -63.90
0 1.92e+05 223 | 57.48 3.5 1600 0 | -5.90 0.03 0.13 0.05
0 1.92e+05 223 | 57.48
0 2.78e+05 308 | 104.86 117.1 1211 493 | -5.65 0.07 0.18 -0.01
0 2.78e+05 308 | 104.86
0 3.65e+05 395 | 123.77 147.7 990 487 | -5.63 0.27 0.13 -0.01
0 3.65e+05 395 | 123.77
0 4.51e+05 486 | 236.73 130.2 1038 361 | -5.65 0.23 0.14 -0.00
0 4.51e+05 486 | 236.73
0 5.38e+05 575 | 286.09 1.4 1059 20 | -5.72 0.17 0.14 0.01
0 5.38e+05 575 | 286.09
0 6.24e+05 664 | 276.44 44.7 1010 53 | -5.76 0.20 0.13 0.02
0 7.10e+05 753 | 287.70 1.7 986 24 | -5.84 0.13 0.12 0.04
0 7.10e+05 753 | 287.70
0 7.97e+05 843 | 223.00 119.7 812 232 | -5.95 0.12 0.14 0.07
| TrainingTime: 845 | SavedDir: ./BipedalWalker-v3_PPO_2
"""
def train_ppo_a2c_for_bipedal_walker_vec_env():
env_name = 'BipedalWalker-v3'
num_envs = 4
from elegantrl.envs.CustomGymEnv import GymVecEnv
agent_class = AgentPPO # DRL algorithm name
env_class = GymVecEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {'env_name': env_name,
'num_envs': num_envs,
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False}
get_gym_env_args(env=build_env(env_class, env_args), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.98
args.horizon_len = args.max_step // 1
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.93
args.lambda_entropy = 0.02
args.eval_times = 16
args.eval_per_step = 5e4
args.if_keep_save = False # keeping save the checkpoint. False means save until stop training.
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
if_check = False
if if_check:
train_agent_multiprocessing(args)
else:
train_agent(args)
"""
-200 < -150 < 300 < 330
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 6.40e+03 33 | -107.05 5.9 169 30 | -5.67 1.30 0.69 -0.01
0 6.40e+03 33 | -107.05
0 5.76e+04 113 | -37.95 2.0 1600 0 | -5.70 0.05 0.12 -0.00
0 5.76e+04 113 | -37.95
0 1.09e+05 196 | 163.69 76.5 1497 287 | -5.39 0.07 0.24 -0.08
0 1.09e+05 196 | 163.69
0 1.60e+05 280 | 28.24 120.4 690 434 | -5.33 0.46 0.17 -0.08
0 2.11e+05 364 | 97.72 147.8 801 396 | -5.32 0.28 0.18 -0.09
0 2.62e+05 447 | 254.85 78.5 1071 165 | -5.37 0.29 0.16 -0.08
0 2.62e+05 447 | 254.85
0 3.14e+05 530 | 274.90 61.5 1001 123 | -5.48 0.34 0.15 -0.04
0 3.14e+05 530 | 274.90
0 3.65e+05 611 | 196.47 121.1 806 220 | -5.60 0.35 0.18 -0.01
0 4.16e+05 689 | 250.12 89.0 890 143 | -5.78 0.32 0.18 0.03
0 4.67e+05 768 | 282.29 25.5 909 17 | -5.94 0.47 0.17 0.07
0 4.67e+05 768 | 282.29
0 5.18e+05 848 | 289.36 1.4 897 14 | -6.07 0.26 0.16 0.10
0 5.18e+05 848 | 289.36
0 5.70e+05 929 | 283.14 33.8 874 35 | -6.29 0.27 0.13 0.16
0 6.21e+05 1007 | 288.53 1.1 870 13 | -6.52 0.22 0.15 0.21
0 6.72e+05 1087 | 288.50 0.9 856 13 | -6.68 0.40 0.15 0.25
0 7.23e+05 1167 | 286.92 1.3 842 16 | -6.86 0.40 0.15 0.30
0 7.74e+05 1246 | 264.75 74.0 790 122 | -7.10 0.42 0.18 0.36
| TrainingTime: 1278 | SavedDir: ./BipedalWalker-v3_PPO_5
"""
if __name__ == '__main__':
GPU_ID = int(sys.argv[1]) if len(sys.argv) > 1 else 0 # >=0 means GPU ID, -1 means CPU
train_ppo_a2c_for_order_execution_vec_env()
| 10,436 | 43.987069 | 113 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/OrderExecution/plot.py
|
import os
import torch
from OrderExecutionEnv import OrderExecutionVecEnvForEval
"""run"""
def check__ask_price_volume():
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 2
env = OrderExecutionVecEnvForEval(num_envs=num_envs, beg_date='2022-09-09', end_date='2022-09-09')
env.if_random = False
env.reset()
max_len1 = env.max_len + 1 # after env.reset()
xs = np.arange(max_len1)
print('xs.shape', xs.shape)
'''ask bid price (from level 1 to 5)'''
from matplotlib.cm import get_cmap
color_map = get_cmap('bwr') # Blue White Red, input 0.0 ~ 1.0 or 0 ~ 1000
ask_prices = np.array(env.ask_prices)
ask_prices[ask_prices < 7.0] = 7.4 # todo 每天快结束的时候,总有一些成交量特别低的异常数据,因此把它们都赋值为最后一个正常的数值
print('ask_prices.shape', ask_prices.shape)
n_level, max_len1 = ask_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plt.fill_between(xs, ask_prices[i], np.zeros_like(ask_prices[i]) + np.nanmax(ask_prices[i]),
facecolor=face_color)
else:
plt.fill_between(xs, ask_prices[i], ask_prices[i + 1],
facecolor=face_color)
bid_prices = np.array(env.bid_prices)
bid_prices[bid_prices < 1] = np.nan
print('bid_prices.shape', bid_prices.shape)
n_level, max_len1 = bid_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
# face_color = color_map(float(i / n_level) * 0.3 + 0.5 + 0.1) # todo 使用红色渐变
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plt.fill_between(xs, bid_prices[i], np.zeros_like(bid_prices[i]) + np.nanmin(bid_prices[i]),
facecolor=face_color)
else:
plt.fill_between(xs, bid_prices[i], bid_prices[i + 1],
facecolor=face_color)
last_price = np.array(env.last_price)
plt.plot(xs, last_price, color='blue', label='last price') # todo 用蓝色把 last price 画出来
'''policy: VWAP (using the data in future)'''
actions = torch.zeros((max_len1, num_envs, 2), dtype=torch.float32, device=env.device)
print('actions.shape', actions.shape)
volume_weights = (env.volume / env.volume.mean() - 1) / env.volume.std(dim=0) + 1
k = 5 # 平滑操作,卷积核是 k*2+1=11
volume_smooths = volume_weights.clone()
for i in range(1, k):
volume_smooths[i:] += volume_weights[:-i]
volume_smooths[:-i] += volume_weights[i:]
volume_smooths /= 2 * k - 1 # convolve
volume_smooths[:k] = volume_smooths[k]
volume_smooths[-k:] = volume_smooths[-k]
prev_price = env.last_price.clone()
prev_price[1:] = env.last_price[:-1]
curr_price = env.last_price * ((volume_smooths - 1.0) * 16 + 1.0)
curr_price = torch.round(curr_price * 100) / 100
curr_price = torch.min(torch.stack((curr_price, env.ask_prices[4])), dim=0)[0]
curr_price[curr_price < 7.3] = 7.4
print(curr_price)
for env_i in range(num_envs):
actions[:, env_i, 0] = curr_price - prev_price
actions[:, env_i, 1] = volume_smooths - 0.75
actions[:, :, 1] = actions[:, :, 1].clip(-1, +1)
plt.plot(xs, curr_price, color='orange', label='VWAP price', linestyle='-') # todo 用橙色把 vmap策略的 执行价格画出来
plt.title(f'ask bid price (from level 1 to 5)')
plt.legend()
plt.grid()
plt.show()
# '''policy in env'''
# ary_remain_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
# ary_self_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
#
# cumulative_rewards = torch.zeros(num_envs, dtype=torch.float32, device=env.device)
# for i in range(1, env.max_len + 1):
# action = actions[i]
# state, reward, done, _ = env.step(action)
# cumulative_rewards += reward
# if done[0]:
# break
#
# ary_remain_quantity[:, i] = env.remain_quantity
# ary_self_quantity[:, i] = env.quantity
#
# ary_delta_quantity = ary_remain_quantity.clone()
# ary_delta_quantity[:, 1:] -= ary_delta_quantity[:, :-1]
# ary_delta_quantity = ary_delta_quantity[0]
#
# k = 5
# smooths = ary_delta_quantity.clone()
# for i in range(1, k):
# smooths[i:] += ary_delta_quantity[:-i]
# smooths[:-i] += ary_delta_quantity[i:]
# smooths /= 2 * k - 1 # convolve
# smooths[:k] = smooths[k]
# smooths[-k:] = smooths[-k]
#
# smooths = ary_delta_quantity.cpu().data.numpy()
#
# plt.plot(xs, smooths, label='VWAP quantity', linestyle='-')
#
# plt.title(f'ask bid price (from level 1 to 5)')
# plt.legend()
# plt.grid()
# plt.show()
def check__ask_price_volume_with_star():
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # OMP: Error #15: Initializing libiomp5md.dll
import matplotlib.pyplot as plt
import numpy as np
num_envs = 2
smooth_kernel = 7
share_name = ['000768_XSHE', '000685_XSHE'][1]
env = OrderExecutionVecEnvForEval(num_envs=num_envs,
beg_date='2022-09-09',
end_date='2022-09-09',
share_name=share_name)
env.if_random = False
env.reset()
max_len1 = env.max_len + 1 # after env.reset()
xs = np.arange(max_len1)
print('xs.shape', xs.shape)
'''ask bid price (from level 1 to 5)'''
from matplotlib.cm import get_cmap
color_map = get_cmap('bwr') # Blue White Red, input 0.0 ~ 1.0 or 0 ~ 1000
ask_prices = np.array(env.ask_prices)
print('ask_prices.shape', ask_prices.shape)
n_level, max_len1 = ask_prices.shape
for i in range(n_level): # todo 这里的代码,把 askPrices 画出来
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # todo 使用蓝色渐变
if i + 1 == n_level:
plot_ask_price = np.zeros_like(ask_prices[i]) + np.nanmax(ask_prices[i])
plt.fill_between(xs, ask_prices[i], plot_ask_price, facecolor=face_color)
else:
plt.fill_between(xs, ask_prices[i], ask_prices[i + 1], facecolor=face_color)
bid_prices = np.array(env.bid_prices)
print('bid_prices.shape', bid_prices.shape)
n_level, max_len1 = bid_prices.shape
for i in range(n_level): # todo 这里的代码,把 bidPrices 画出来
# face_color = color_map(float(i / n_level) * 0.3 + 0.5 + 0.1) # red # todo 使用红色渐变
face_color = color_map(float(1 - i / n_level) * 0.2 + 0.2) # blue # todo 使用蓝色渐变
if i + 1 == n_level:
plot_bid_price = np.zeros_like(bid_prices[i]) + np.nanmin(bid_prices[i])
plt.fill_between(xs, bid_prices[i], plot_bid_price, facecolor=face_color)
else:
plt.fill_between(xs, bid_prices[i], bid_prices[i + 1], facecolor=face_color)
last_price = np.array(env.last_price)
plt.plot(xs, last_price, color='blue', label='last price') # todo 用蓝色把 last price 画出来
'''policy action'''
actions = torch.zeros((max_len1, num_envs, 2), dtype=torch.float32, device=env.device)
print('actions.shape', actions.shape)
# 0: the delta price is 0 in default
# 1: the quantity scale is +1 in default
'''policy: TWAP (one times of basic_quantity)'''
# actions[:, :, 0] = 0.0
# actions[:, :, 1] = 0.0 # (0.0+1) times of basic_quantity
'''policy: VWAP (using the data in future)'''
volume_weights = (env.volume / env.volume.mean() - 1) / env.volume.std(dim=0) + 1
volume_smooths = torch_convolve(volume_weights, k=smooth_kernel, dim=0)
prev_price = env.last_price.clone()
prev_price[1:] = env.last_price[:-1]
curr_price = env.last_price * ((volume_smooths - 1.0) * 2 * env.last_price.mean() + 1.0)
curr_price = torch.round(curr_price * 100) / 100
curr_price = torch.min(torch.stack((curr_price, env.ask_prices[4])), dim=0)[0]
for env_i in range(num_envs):
actions[:, env_i, 0] = curr_price - prev_price
action_quantity = (volume_smooths - volume_smooths.mean()) * 12e3 + 1.8
actions[:, env_i, 1] = action_quantity - 1
actions[:, :, 1] = actions[:, :, 1].clip(-1, +1 + 3)
'''policy in env'''
env_i = 0
ten_remain_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_remain_quantity[:, 0] = env.remain_quantity
ten_sell_quantity = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_sell_quantity[:, 0] = env.get_curr_quantity(actions[0][:, 1])
ten_curr_price = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_curr_price[:, 0] = env.get_curr_price(actions[0][:, 0])
ten_rewards = torch.zeros((num_envs, env.max_len + 1), dtype=torch.float32, device=env.device)
ten_rewards[:, 0] = 0
for i in range(1, env.max_len + 1):
action = actions[i]
state, reward, done, _ = env.step(action)
ten_rewards[:, i] = reward
if done[0]:
break
ten_remain_quantity[:, i] = env.remain_quantity
ten_sell_quantity[:, i] = env.curr_quantity
ten_curr_price[:, i] = env.curr_price
# ary_remain_quantity = ten_remain_quantity[env_i].cpu().data.numpy()
# plt.plot(xs, ary_remain_quantity, label='VWAP remain_quantity', linestyle='-')
ten_exec_quantity = torch.zeros_like(ten_remain_quantity)
ten_exec_quantity[:, 1:] = ten_remain_quantity[:, :-1] - ten_remain_quantity[:, 1:]
filled_bool = (ten_exec_quantity == ten_sell_quantity)[env_i]
not_filled_bool = (ten_exec_quantity < ten_sell_quantity)[env_i]
"""
plt.scatter(marker=(5, 1)) # marker=(5, 1), 表示5角星里的第1款
https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/scatter_star_poly.html
"""
# plt.plot(xs, curr_price, color='orange', label='VWAP price', linestyle='-') # todo 用橙色把 vmap策略的 执行价格画出来
filled_xs = xs[filled_bool]
filled_price = curr_price[filled_bool]
plt.scatter(filled_xs, filled_price, color='orange', label='VWAP price (filled)', marker=(5, 1))
not_filled_xs = xs[not_filled_bool]
not_filled_price = curr_price[not_filled_bool]
plt.scatter(not_filled_xs, not_filled_price, color='brown', label='VWAP price (not filled)', marker=(5, 1))
plt.title(f'ask bid price (from level 1 to 5)')
plt.legend()
plt.grid()
plt.show()
'''draw executed_quantity <= sell_quantity'''
# smo_exec_quantity = torch_convolve(ten_exec_quantity.T, k=smooth_kernel, dim=0).T # todo smooth
# ary_exec_quantity = smo_exec_quantity[env_i].cpu().data.numpy()
# plt.plot(xs, ary_exec_quantity, label='VWAP executed_quantity', linestyle='-')
#
# smo_sell_quantity = torch_convolve(ten_sell_quantity.T, k=smooth_kernel, dim=0).T # todo smooth
# ary_sell_quantity = smo_sell_quantity.cpu().data.numpy()[env_i]
# plt.plot(xs, ary_sell_quantity, label='VWAP sell_quantity', linestyle='-')
#
# plt.title(f'ask bid price (from level 1 to 5)')
# plt.legend()
# plt.grid()
# plt.show()
def torch_convolve(inp, k=9, dim=0):
assert dim == 0
out = inp.clone()
for i in range(1, k):
out[i:] += inp[:-i]
out[:-i] += inp[i:]
out /= 2 * k - 1 # convolve
out[:k] = out[k]
out[-k:] = out[-k]
return out
if __name__ == '__main__':
# check__ask_price_volume()
check__ask_price_volume_with_star()
| 11,653 | 39.748252 | 111 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_env.py
|
import importlib
from typing import Any, Dict, List
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from envs.markets_environment import AbidesGymMarketsEnv
class MarketsDailyInvestorEnv(AbidesGymMarketsEnv):
"""
Daily Investor V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the classic problem where an investor tries to make money buying and selling a stock through-out a single day.
The investor starts the day with cash but no position then repeatedly buy and sell the stock in order to maximize its
marked to market value at the end of the day (i.e. cash plus holdingsvalued at the market price).
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
Execution V0:
- Action Space:
- MKT buy order_fixed_size
- Hold
- MKT sell order_fixed_size
- State Space:
- Holdings
- Imbalance
- Spread
- DirectionFeature
- padded_returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
def __init__(
self,
background_config: str = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:05:00",
reward_mode: str = "dense",
done_ratio: float = 0.3,
debug_mode: bool = False,
background_config_extra_kvargs={},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
) #
self.mkt_close: NanosecondTime = str_to_ns(mkt_close) #
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration) #
self.starting_cash: int = starting_cash #
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.reward_mode: str = reward_mode
self.done_ratio: float = done_ratio
self.debug_mode: bool = debug_mode
# marked_to_market limit to STOP the episode
self.down_done_condition: float = self.done_ratio * starting_cash
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03, rmsc04 or smc_01 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert reward_mode in [
"sparse",
"dense",
], "reward_mode needs to be dense or sparse"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert (
(type(self.done_ratio) == float)
& (self.done_ratio >= 0)
& (self.done_ratio < 1)
), "Select positive float value for order_fixed_size between 0 and 1"
assert debug_mode in [
True,
False,
], "reward_mode needs to be True or False"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT buy order_fixed_size | Hold | MKT sell order_fixed_size
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# State Space
# [Holdings, Imbalance, Spread, DirectionFeature] + padded_returns
self.num_state_features: int = 4 + self.state_history_length - 1
# construct state space "box"
self.state_highs: np.ndarray = np.array(
[
np.finfo(np.float32).max, # Holdings
1.0, # Imbalance
np.finfo(np.float32).max, # Spread
np.finfo(np.float32).max, # DirectionFeature
]
+ (self.state_history_length - 1)
* [np.finfo(np.float32).max], # padded_returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
np.finfo(np.float32).min, # Holdings
0.0, # Imbalance
np.finfo(np.float32).min, # Spread
np.finfo(np.float32).min, # DirectionFeature
]
+ (self.state_history_length - 1)
* [np.finfo(np.float32).min], # padded_returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# instantiate previous_marked_to_market as starting_cash
self.previous_marked_to_market = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT buy order_fixed_size
- `1` Hold ( i.e. do nothing )
- '2' MKT sell order_fixed_size
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
if action == 0:
return [{"type": "MKT", "direction": "BUY", "size": self.order_fixed_size}]
elif action == 1:
return []
elif action == 2:
return [{"type": "MKT", "direction": "SELL", "size": self.order_fixed_size}]
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the daily investor v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Imbalance
imbalances = [
markets_agent_utils.get_imbalance(b, a, depth=3)
for (b, a) in zip(bids, asks)
]
# 3) Returns
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns):] = (
returns if len(returns) > 0 else padded_returns
)
# 4) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
# 5) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
# 6) Compute State (Holdings, Imbalance, Spread, DirectionFeature + Returns)
computed_state = np.array(
[holdings[-1], imbalances[-1], spreads[-1], direction_features[-1]]
+ padded_returns.tolist(),
dtype=np.float32,
)
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the daily investor v0 environnement
"""
if self.reward_mode == "dense":
# Sparse Reward here
# Agents get reward at the end of the episode
# reward is computed for the last step for each episode
# can update with additional reward at end of episode depending on scenario
# here add additional +- 10% if end because of bounds being reached
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
# 5) Reward
reward = marked_to_market - self.previous_marked_to_market
# 6) Order Size Normalization of Reward
reward = reward / self.order_fixed_size
# 7) Time Normalization of Reward
num_ns_day = (16 - 9.5) * 60 * 60 * 1e9
step_length = self.timestep_duration
num_steps_per_episode = num_ns_day / step_length
reward = reward / num_steps_per_episode
# 8) update previous mm
self.previous_marked_to_market = marked_to_market
return reward
elif self.reward_mode == "sparse":
return 0
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the daily investor v0 environnement
"""
if self.reward_mode == "dense":
return 0
elif self.reward_mode == "sparse":
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
reward = marked_to_market - self.starting_cash
# 5) Order Size Normalization of Reward
reward = reward / self.order_fixed_size
# 6) Time Normalization of Reward
num_ns_day = (16 - 9.5) * 60 * 60 * 1e9
step_length = self.timestep_duration
num_steps_per_episode = num_ns_day / step_length
reward = reward / num_steps_per_episode
return reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the daily investor v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here choose to make it trader has lost too much money
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
# 5) comparison
done = marked_to_market <= self.down_done_condition
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the daily investor v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Available Cash
cash = raw_state["internal_data"]["cash"]
# 5) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 6) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 7) Spread
spread = best_ask - best_bid
# 8) OrderBook features
orderbook = {
"asks": {"price": {}, "volume": {}},
"bids": {"price": {}, "volume": {}},
}
for book, book_name in [(bids, "bids"), (asks, "asks")]:
for level in [0, 1, 2]:
price, volume = markets_agent_utils.get_val(bids, level)
orderbook[book_name]["price"][level] = np.array([price]).reshape(-1)
orderbook[book_name]["volume"][level] = np.array([volume]).reshape(-1)
# 9) order_status
order_status = raw_state["internal_data"]["order_status"]
# 10) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 11) mkt_close
mkt_close = raw_state["internal_data"]["mkt_close"]
# 12) last vals
last_bid = markets_agent_utils.get_last_val(bids, last_transaction)
last_ask = markets_agent_utils.get_last_val(asks, last_transaction)
# 13) spreads
wide_spread = last_ask - last_bid
ask_spread = last_ask - best_ask
bid_spread = best_bid - last_bid
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"spread": spread,
"bids": bids,
"asks": asks,
"cash": cash,
"current_time": current_time,
"holdings": holdings,
"orderbook": orderbook,
"order_status": order_status,
"mkt_open": mkt_open,
"mkt_close": mkt_close,
"last_bid": last_bid,
"last_ask": last_ask,
"wide_spread": wide_spread,
"ask_spread": ask_spread,
"bid_spread": bid_spread,
"marked_to_market": marked_to_market,
}
else:
return {}
| 18,990 | 37.599593 | 154 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/markets_execution_environment_v0.py
|
import importlib
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List
from abc import ABC
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from .markets_environment import AbidesGymMarketsEnv
class SubGymMarketsExecutionEnv_v0(AbidesGymMarketsEnv):
"""
Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the algorithmic orderexecution problem.
The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and
tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads
and marketimpact. It does so by splitting the parent order into several smallerchild orders.
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- parent_order_size: Total size the agent has to execute (eitherbuy or sell).
- execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution.
- direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell)
- not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤
- just_quantity_reward_update: update reward if all order is completed
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
- background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function
Daily Investor V0:
- Action Space:
- MKT order_fixed_size
- LMT order_fixed_size
- Hold
- State Space:
- holdings_pct
- time_pct
- diff_pct
- imbalance_all
- imbalance_5
- price_impact
- spread
- direction
- returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
@dataclass
class CustomMetricsTracker(ABC):
"""
Data Class used to track custom metrics that are output to rllib
"""
slippage_reward: float = 0
late_penalty_reward: float = 0 # at the end of the episode
executed_quantity: int = 0 # at the end of the episode
remaining_quantity: int = 0 # at the end of the episode
action_counter: Dict[str, int] = field(default_factory=dict)
holdings_pct: float = 0
time_pct: float = 0
diff_pct: float = 0
imbalance_all: float = 0
imbalance_5: float = 0
price_impact: int = 0
spread: int = 0
direction_feature: float = 0
num_max_steps_per_episode: float = 0
def __init__(
self,
background_config: Any = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:00:30",
parent_order_size: int = 1000,
execution_window: str = "00:10:00",
direction: str = "BUY",
not_enough_reward_update: int = -1000,
too_much_reward_update: int = -100,
just_quantity_reward_update: int = 0,
debug_mode: bool = False,
background_config_extra_kvargs: Dict[str, Any] = {},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
)
self.mkt_close: NanosecondTime = str_to_ns(mkt_close)
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration)
self.starting_cash: int = starting_cash
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.parent_order_size: int = parent_order_size
self.execution_window: str = str_to_ns(execution_window)
self.direction: str = direction
self.debug_mode: bool = debug_mode
self.too_much_reward_update: int = too_much_reward_update
self.not_enough_reward_update: int = not_enough_reward_update
self.just_quantity_reward_update: int = just_quantity_reward_update
self.entry_price: int = 1
self.far_touch: int = 1
self.near_touch: int = 1
self.step_index: int = 0
self.custom_metrics_tracker = (
self.CustomMetricsTracker()
) # init the custom metric tracker
##################
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03 or rmsc04 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert self.debug_mode in [
True,
False,
], "debug_mode needs to be True or False"
assert self.direction in [
"BUY",
"SELL",
], "direction needs to be BUY or SELL"
assert (type(self.parent_order_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for parent_order_size"
assert (self.execution_window <= str_to_ns("06:30:00")) & (
self.execution_window >= str_to_ns("00:00:00")
), "Select authorized execution_window"
assert (
type(self.too_much_reward_update) == int
), "Select integer value for too_much_reward_update"
assert (
type(self.not_enough_reward_update) == int
), "Select integer value for not_enough_reward_update"
assert (
type(self.just_quantity_reward_update) == int
), "Select integer value for just_quantity_reward_update"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT order_fixed_size | LMT order_fixed_size | Hold
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# instantiate the action counter
for i in range(self.num_actions):
self.custom_metrics_tracker.action_counter[f"action_{i}"] = 0
num_ns_episode = self.first_interval + self.execution_window
step_length = self.timestep_duration
num_max_steps_per_episode = num_ns_episode / step_length
self.custom_metrics_tracker.num_max_steps_per_episode = (
num_max_steps_per_episode
)
# State Space
# [holdings, imbalance,spread, direction_feature] + padded_returns
self.num_state_features: int = 8 + self.state_history_length - 1
# construct state space "box"
# holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction, returns
self.state_highs: np.ndarray = np.array(
[
2, # holdings_pct
2, # time_pct
4, # diff_pct
1, # imbalance_all
1, # imbalance_5
np.finfo(np.float32).max, # price_impact
np.finfo(np.float32).max, # spread
np.finfo(np.float32).max,
]
+ (self.state_history_length - 1) # directiom
* [np.finfo(np.float32).max], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
-2, # holdings_pct
-2, # time_pct
-4, # diff_pct
0, # imbalance_all
0, # imbalance_5
np.finfo(np.float32).min, # price_impact
np.finfo(np.float32).min, # spread
np.finfo(np.float32).min,
]
+ (self.state_history_length - 1) # direction
* [np.finfo(np.float32).min], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# initialize previous_marked_to_market to starting_cash (No holding at the beginning of the episode)
self.previous_marked_to_market: int = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT direction order_fixed_size
- '1' LMT direction order_fixed_size
- '2' DO NOTHING
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
self.custom_metrics_tracker.action_counter[
f"action_{action}"
] += 1 # increase counter
if action == 0:
return [
{"type": "CCL_ALL"},
{
"type": "MKT",
"direction": self.direction,
"size": self.order_fixed_size,
},
]
elif action == 1:
return [
{"type": "CCL_ALL"},
{
"type": "LMT",
"direction": self.direction,
"size": self.order_fixed_size,
"limit_price": self.near_touch,
},
]
elif action == 2:
return []
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the execution v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
holdings_pct = holdings[-1] / self.parent_order_size
# 2) Timing
# 2)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"][-1]
# 2)b) time from beginning of execution (parent arrival)
current_time = raw_state["internal_data"]["current_time"][-1]
time_from_parent_arrival = current_time - mkt_open - self.first_interval
assert (
current_time >= mkt_open + self.first_interval
), "Agent has woken up earlier than its first interval"
# 2)c) time limit
time_limit = self.execution_window
# 2)d) compute percentage time advancement
time_pct = time_from_parent_arrival / time_limit
# 3) Advancement Comparison
diff_pct = holdings_pct - time_pct
# 3) Imbalance
imbalances_all = [
markets_agent_utils.get_imbalance(b, a, depth=None)
for (b, a) in zip(bids, asks)
]
imbalance_all = imbalances_all[-1]
imbalances_5 = [
markets_agent_utils.get_imbalance(b, a, depth=5)
for (b, a) in zip(bids, asks)
]
imbalance_5 = imbalances_5[-1]
# 4) price_impact
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
mid_price = mid_prices[-1]
if self.step_index == 0: # 0 order has been executed yet
self.entry_price = mid_price
entry_price = self.entry_price
book = (
raw_state["parsed_mkt_data"]["bids"][-1]
if self.direction == "BUY"
else raw_state["parsed_mkt_data"]["asks"][-1]
)
self.near_touch = book[0][0] if len(book) > 0 else last_transactions[-1]
# Compute the price impact
price_impact = (
np.log(mid_price / entry_price)
if self.direction == "BUY"
else np.log(entry_price / mid_price)
)
# 5) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
spread = spreads[-1]
# 6) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
direction_feature = direction_features[-1]
# 7) mid_price
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns):] = (
returns if len(returns) > 0 else padded_returns
)
# log custom metrics to tracker
self.custom_metrics_tracker.holdings_pct = holdings_pct
self.custom_metrics_tracker.time_pct = time_pct
self.custom_metrics_tracker.diff_pct = diff_pct
self.custom_metrics_tracker.imbalance_all = imbalance_all
self.custom_metrics_tracker.imbalance_5 = imbalance_5
self.custom_metrics_tracker.price_impact = price_impact
self.custom_metrics_tracker.spread = spread
self.custom_metrics_tracker.direction_feature = direction_feature
# 8) Computed State
computed_state = np.array(
[
holdings_pct,
time_pct,
diff_pct,
imbalance_all,
imbalance_5,
price_impact,
spread,
direction_feature,
]
+ padded_returns.tolist(),
dtype=np.float32,
)
#
self.step_index += 1
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the execution v0 environnement
"""
# here we define the reward as cash + position marked to market normalized by parent_order_size
# 1) entry_price
entry_price = self.entry_price
# 2) inter_wakeup_executed_orders
inter_wakeup_executed_orders = raw_state["internal_data"][
"inter_wakeup_executed_orders"
]
# 3) Compute PNL of the orders
if len(inter_wakeup_executed_orders) == 0:
pnl = 0
else:
pnl = (
sum(
(entry_price - order.fill_price) * order.quantity
for order in inter_wakeup_executed_orders
)
if self.direction == "BUY"
else sum(
(order.fill_price - entry_price) * order.quantity
for order in inter_wakeup_executed_orders
)
)
self.pnl = pnl
# 4) normalization
reward = pnl / self.parent_order_size
# log custom metrics to tracker
self.custom_metrics_tracker.slippage_reward = reward
return reward
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the execution v0 environnement
"""
# can update with additional reward at end of episode depending on scenario normalized by parent_order_size
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) Compute update_reward
if (self.direction == "BUY") and (holdings >= parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed buy too much
elif (self.direction == "BUY") and (holdings < parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed buy not enough
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed sell too much
elif (self.direction == "SELL") and (holdings > -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed sell not enough
else:
update_reward = self.just_quantity_reward_update
# 4) Normalization
update_reward = update_reward / self.parent_order_size
self.custom_metrics_tracker.late_penalty_reward = update_reward
return update_reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the execution v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here the condition is parent order fully executed
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) current time
current_time = raw_state["internal_data"]["current_time"]
# 4) time_limit
# 4)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 4)b time_limit
time_limit = mkt_open + self.first_interval + self.execution_window
# 5) conditions
if (self.direction == "BUY") and (holdings >= parent_order_size):
done = True # Buy parent order executed
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
done = True # Sell parent order executed
elif current_time >= time_limit:
done = True # Mkt Close
else:
done = False
self.custom_metrics_tracker.executed_quantity = (
holdings if self.direction == "BUY" else -holdings
)
self.custom_metrics_tracker.remaining_quantity = (
parent_order_size - self.custom_metrics_tracker.executed_quantity
)
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the execution v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 5) Holdings
holdings = raw_state["internal_data"]["holdings"]
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"current_time": current_time,
"holdings": holdings,
"parent_size": self.parent_order_size,
"pnl": self.pnl,
"reward": self.pnl / self.parent_order_size,
}
else:
return asdict(self.custom_metrics_tracker)
| 24,758 | 37.386047 | 144 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/markets_environment.py
|
from copy import deepcopy
from abc import abstractmethod, ABC
from typing import Any, Callable, Dict, List, Optional, Tuple
import gym
import numpy as np
from gym.utils import seeding
import abides_markets.agents.utils as markets_agent_utils
from abides_core import Kernel, NanosecondTime
from abides_core.generators import InterArrivalTimeGenerator
from abides_core.utils import subdict
from abides_markets.utils import config_add_agents
from .core_environment import AbidesGymCoreEnv
from ..experimental_agents.financial_gym_agent import FinancialGymAgent
class AbidesGymMarketsEnv(AbidesGymCoreEnv, ABC):
"""
Abstract class for markets gym to inherit from to create usable specific ABIDES Gyms
Arguments:
- background_config_pair: tuple consisting in the background builder function and the inputs to use
- wakeup_interval_generator: generator used to compute delta time wakeup for the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- raw_state_pre_process: decorator used to pre-process raw_state
"""
raw_state_pre_process = markets_agent_utils.identity_decorator
def __init__(
self,
background_config_pair: Tuple[Callable, Optional[Dict[str, Any]]],
wakeup_interval_generator: InterArrivalTimeGenerator,
starting_cash: int,
state_buffer_length: int,
market_data_buffer_length: int,
first_interval: Optional[NanosecondTime] = None,
raw_state_pre_process=markets_agent_utils.identity_decorator,
) -> None:
super().__init__(
background_config_pair,
wakeup_interval_generator,
state_buffer_length,
first_interval=first_interval,
gymAgentConstructor=FinancialGymAgent,
)
self.starting_cash: int = starting_cash
self.market_data_buffer_length: int = market_data_buffer_length
self.extra_gym_agent_kvargs = {
"starting_cash": self.starting_cash,
"market_data_buffer_length": self.market_data_buffer_length,
}
self.extra_background_config_kvargs = {
"exchange_log_orders": False,
"book_logging": False, # may need to set to True if wants to return OB in terminal state when episode ends (gym2)
"log_orders": None,
}
| 2,623 | 40 | 126 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/core_environment.py
|
from copy import deepcopy
from abc import abstractmethod, ABC
from typing import Any, Callable, Dict, List, Optional, Tuple
import gym
import numpy as np
from gym.utils import seeding
from abides_core import Kernel, NanosecondTime
from abides_core.generators import InterArrivalTimeGenerator
from abides_core.utils import subdict
from abides_markets.utils import config_add_agents
class AbidesGymCoreEnv(gym.Env, ABC):
"""
Abstract class for core gym to inherit from to create usable specific ABIDES Gyms
"""
def __init__(
self,
background_config_pair: Tuple[Callable, Optional[Dict[str, Any]]],
wakeup_interval_generator: InterArrivalTimeGenerator,
state_buffer_length: int,
first_interval: Optional[NanosecondTime] = None,
gymAgentConstructor=None,
) -> None:
self.background_config_pair: Tuple[
Callable, Optional[Dict[str, Any]]
] = background_config_pair
if background_config_pair[1] is None:
background_config_pair[1] = {}
self.wakeup_interval_generator: InterArrivalTimeGenerator = (
wakeup_interval_generator
)
self.first_interval = first_interval
self.state_buffer_length: int = state_buffer_length
self.gymAgentConstructor = gymAgentConstructor
self.seed() # fix random seed if no seed specified
self.state: Optional[np.ndarray] = None
self.reward: Optional[float] = None
self.done: Optional[bool] = None
self.info: Optional[Dict[str, Any]] = None
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
# get seed to initialize random states for ABIDES
seed = self.np_random.randint(low=0, high=2 ** 32, dtype="uint64")
# instanciate back ground config state
background_config_args = self.background_config_pair[1]
background_config_args.update(
{"seed": seed, **self.extra_background_config_kvargs}
)
background_config_state = self.background_config_pair[0](
**background_config_args
)
# instanciate gym agent and add it to config and gym object
nextid = len(background_config_state["agents"])
gym_agent = self.gymAgentConstructor(
nextid,
"ABM",
first_interval=self.first_interval,
wakeup_interval_generator=self.wakeup_interval_generator,
state_buffer_length=self.state_buffer_length,
**self.extra_gym_agent_kvargs,
)
config_state = config_add_agents(background_config_state, [gym_agent])
self.gym_agent = config_state["agents"][-1]
# KERNEL
# instantiate the kernel object
kernel = Kernel(
random_state=np.random.RandomState(seed=seed),
**subdict(
config_state,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
kernel.initialize()
# kernel will run until GymAgent has to take an action
raw_state = kernel.runner()
state = self.raw_state_to_state(deepcopy(raw_state["result"]))
# attach kernel
self.kernel = kernel
return state
def step(self, action: int) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
"""
The agent takes a step in the environment.
Parameters
----------
action : Discrete
Returns
-------
observation, reward, done, info : tuple
observation (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
done (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
assert self.action_space.contains(
action
), f"Action {action} is not contained in Action Space"
abides_action = self._map_action_space_to_ABIDES_SIMULATOR_SPACE(action)
raw_state = self.kernel.runner((self.gym_agent, abides_action))
self.state = self.raw_state_to_state(deepcopy(raw_state["result"]))
assert self.observation_space.contains(
self.state
), f"INVALID STATE {self.state}"
self.reward = self.raw_state_to_reward(deepcopy(raw_state["result"]))
self.done = raw_state["done"] or self.raw_state_to_done(
deepcopy(raw_state["result"])
)
if self.done:
self.reward += self.raw_state_to_update_reward(
deepcopy(raw_state["result"])
)
self.info = self.raw_state_to_info(deepcopy(raw_state["result"]))
return (self.state, self.reward, self.done, self.info)
def render(self, mode: str = "human") -> None:
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
"""
print(self.state, self.reward, self.info)
def seed(self, seed: Optional[int] = None) -> List[Any]:
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def close(self) -> None:
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
# kernel.termination()
##TODO: look at whether some cleaning functions needed for abides
@abstractmethod
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
abstract method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP
"""
raise NotImplementedError
@abstractmethod
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
abstract method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step
"""
raise NotImplementedError
@abstractmethod
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> float:
"""
abstract method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not
"""
raise NotImplementedError
@abstractmethod
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> bool:
"""
abstract method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode
"""
raise NotImplementedError
@abstractmethod
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
abstract method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step
"""
raise NotImplementedError
| 10,556 | 36.703571 | 118 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/markets_daily_investor_environment_v0.py
|
import importlib
from typing import Any, Dict, List
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from .markets_environment import AbidesGymMarketsEnv
class SubGymMarketsDailyInvestorEnv_v0(AbidesGymMarketsEnv):
"""
Daily Investor V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the classic problem where an investor tries to make money buying and selling a stock through-out a single day.
The investor starts the day with cash but no position then repeatedly buy and sell the stock in order to maximize its
marked to market value at the end of the day (i.e. cash plus holdingsvalued at the market price).
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
Execution V0:
- Action Space:
- MKT buy order_fixed_size
- Hold
- MKT sell order_fixed_size
- State Space:
- Holdings
- Imbalance
- Spread
- DirectionFeature
- padded_returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
def __init__(
self,
background_config: str = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:05:00",
reward_mode: str = "dense",
done_ratio: float = 0.3,
debug_mode: bool = False,
background_config_extra_kvargs={},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
) #
self.mkt_close: NanosecondTime = str_to_ns(mkt_close) #
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration) #
self.starting_cash: int = starting_cash #
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.reward_mode: str = reward_mode
self.done_ratio: float = done_ratio
self.debug_mode: bool = debug_mode
# marked_to_market limit to STOP the episode
self.down_done_condition: float = self.done_ratio * starting_cash
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03, rmsc04 or smc_01 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert reward_mode in [
"sparse",
"dense",
], "reward_mode needs to be dense or sparse"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert (
(type(self.done_ratio) == float)
& (self.done_ratio >= 0)
& (self.done_ratio < 1)
), "Select positive float value for order_fixed_size between 0 and 1"
assert debug_mode in [
True,
False,
], "reward_mode needs to be True or False"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT buy order_fixed_size | Hold | MKT sell order_fixed_size
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# State Space
# [Holdings, Imbalance, Spread, DirectionFeature] + padded_returns
self.num_state_features: int = 4 + self.state_history_length - 1
# construct state space "box"
self.state_highs: np.ndarray = np.array(
[
np.finfo(np.float32).max, # Holdings
1.0, # Imbalance
np.finfo(np.float32).max, # Spread
np.finfo(np.float32).max, # DirectionFeature
]
+ (self.state_history_length - 1)
* [np.finfo(np.float32).max], # padded_returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
np.finfo(np.float32).min, # Holdings
0.0, # Imbalance
np.finfo(np.float32).min, # Spread
np.finfo(np.float32).min, # DirectionFeature
]
+ (self.state_history_length - 1)
* [np.finfo(np.float32).min], # padded_returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# instantiate previous_marked_to_market as starting_cash
self.previous_marked_to_market = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT buy order_fixed_size
- `1` Hold ( i.e. do nothing )
- '2' MKT sell order_fixed_size
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
if action == 0:
return [{"type": "MKT", "direction": "BUY", "size": self.order_fixed_size}]
elif action == 1:
return []
elif action == 2:
return [{"type": "MKT", "direction": "SELL", "size": self.order_fixed_size}]
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the daily investor v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Imbalance
imbalances = [
markets_agent_utils.get_imbalance(b, a, depth=3)
for (b, a) in zip(bids, asks)
]
# 3) Returns
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns) :] = (
returns if len(returns) > 0 else padded_returns
)
# 4) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
# 5) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
# 6) Compute State (Holdings, Imbalance, Spread, DirectionFeature + Returns)
computed_state = np.array(
[holdings[-1], imbalances[-1], spreads[-1], direction_features[-1]]
+ padded_returns.tolist(),
dtype=np.float32,
)
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the daily investor v0 environnement
"""
if self.reward_mode == "dense":
# Sparse Reward here
# Agents get reward at the end of the episode
# reward is computed for the last step for each episode
# can update with additional reward at end of episode depending on scenario
# here add additional +- 10% if end because of bounds being reached
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
# 5) Reward
reward = marked_to_market - self.previous_marked_to_market
# 6) Order Size Normalization of Reward
reward = reward / self.order_fixed_size
# 7) Time Normalization of Reward
num_ns_day = (16 - 9.5) * 60 * 60 * 1e9
step_length = self.timestep_duration
num_steps_per_episode = num_ns_day / step_length
reward = reward / num_steps_per_episode
# 8) update previous mm
self.previous_marked_to_market = marked_to_market
return reward
elif self.reward_mode == "sparse":
return 0
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the daily investor v0 environnement
"""
if self.reward_mode == "dense":
return 0
elif self.reward_mode == "sparse":
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
reward = marked_to_market - self.starting_cash
# 5) Order Size Normalization of Reward
reward = reward / self.order_fixed_size
# 6) Time Normalization of Reward
num_ns_day = (16 - 9.5) * 60 * 60 * 1e9
step_length = self.timestep_duration
num_steps_per_episode = num_ns_day / step_length
reward = reward / num_steps_per_episode
return reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the daily investor v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here choose to make it trader has lost too much money
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) Available Cash
cash = raw_state["internal_data"]["cash"]
# 3) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
# 5) comparison
done = marked_to_market <= self.down_done_condition
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the daily investor v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Available Cash
cash = raw_state["internal_data"]["cash"]
# 5) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 6) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 7) Spread
spread = best_ask - best_bid
# 8) OrderBook features
orderbook = {
"asks": {"price": {}, "volume": {}},
"bids": {"price": {}, "volume": {}},
}
for book, book_name in [(bids, "bids"), (asks, "asks")]:
for level in [0, 1, 2]:
price, volume = markets_agent_utils.get_val(bids, level)
orderbook[book_name]["price"][level] = np.array([price]).reshape(-1)
orderbook[book_name]["volume"][level] = np.array([volume]).reshape(-1)
# 9) order_status
order_status = raw_state["internal_data"]["order_status"]
# 10) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 11) mkt_close
mkt_close = raw_state["internal_data"]["mkt_close"]
# 12) last vals
last_bid = markets_agent_utils.get_last_val(bids, last_transaction)
last_ask = markets_agent_utils.get_last_val(asks, last_transaction)
# 13) spreads
wide_spread = last_ask - last_bid
ask_spread = last_ask - best_ask
bid_spread = best_bid - last_bid
# 4) compute the marked to market
marked_to_market = cash + holdings * last_transaction
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"spread": spread,
"bids": bids,
"asks": asks,
"cash": cash,
"current_time": current_time,
"holdings": holdings,
"orderbook": orderbook,
"order_status": order_status,
"mkt_open": mkt_open,
"mkt_close": mkt_close,
"last_bid": last_bid,
"last_ask": last_ask,
"wide_spread": wide_spread,
"ask_spread": ask_spread,
"bid_spread": bid_spread,
"marked_to_market": marked_to_market,
}
else:
return {}
| 18,901 | 37.340771 | 154 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/markets_execution_custom_metrics.py
|
from collections import defaultdict
from typing import Dict
import numpy as np
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env import BaseEnv
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
class MyCallbacks(DefaultCallbacks):
"""
Class that defines callbacks for the execution environment
"""
def on_episode_start(
self,
*,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
"""Callback run on the rollout worker before each episode starts.
Args:
worker (RolloutWorker): Reference to the current rollout worker.
base_env (BaseEnv): BaseEnv running the episode. The underlying
env object can be gotten by calling base_env.get_unwrapped().
policies (dict): Mapping of policy id to policy objects. In single
agent mode there will only be a single "default" policy.
episode (MultiAgentEpisode): Episode object which contains episode
state. You can use the `episode.user_data` dict to store
temporary data, and `episode.custom_metrics` to store custom
metrics for the episode.
env_index (EnvID): Obsoleted: The ID of the environment, which the
episode belongs to.
kwargs: Forward compatibility placeholder.
"""
# Make sure this episode has just been started (only initial obs
# logged so far).
assert episode.length == 0, (
"ERROR: `on_episode_start()` callback should be called right "
"after env reset!"
)
episode.user_data = defaultdict(default_factory=list)
def on_episode_step(
self,
*,
worker: RolloutWorker,
base_env: BaseEnv,
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
"""Runs on each episode step.
Args:
worker (RolloutWorker): Reference to the current rollout worker.
base_env (BaseEnv): BaseEnv running the episode. The underlying
env object can be gotten by calling base_env.get_unwrapped().
policies (Optional[Dict[PolicyID, Policy]]): Mapping of policy id
to policy objects. In single agent mode there will only be a
single "default_policy".
episode (MultiAgentEpisode): Episode object which contains episode
state. You can use the `episode.user_data` dict to store
temporary data, and `episode.custom_metrics` to store custom
metrics for the episode.
env_index (EnvID): Obsoleted: The ID of the environment, which the
episode belongs to.
kwargs: Forward compatibility placeholder.
"""
# Make sure this episode is ongoing.
assert episode.length > 0, (
"ERROR: `on_episode_step()` callback should not be called right "
"after env reset!"
)
agent0_info = episode._agent_to_last_info["agent0"]
for k, v in agent0_info.items():
episode.user_data[k].append(v)
def on_episode_end(
self,
*,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
env_index: int,
**kwargs,
):
"""Runs when an episode is done.
Args:
worker (RolloutWorker): Reference to the current rollout worker.
base_env (BaseEnv): BaseEnv running the episode. The underlying
env object can be gotten by calling base_env.get_unwrapped().
policies (Dict[PolicyID, Policy]): Mapping of policy id to policy
objects. In single agent mode there will only be a single
"default_policy".
episode (MultiAgentEpisode): Episode object which contains episode
state. You can use the `episode.user_data` dict to store
temporary data, and `episode.custom_metrics` to store custom
metrics for the episode.
env_index (EnvID): Obsoleted: The ID of the environment, which the
episode belongs to.
kwargs: Forward compatibility placeholder.
"""
# this corresponds to feature we are interested by the last value - whole episode
for metrics in [
"slippage_reward",
"late_penalty_reward",
"executed_quantity",
"remaining_quantity",
]:
episode.custom_metrics[metrics] = np.sum(episode.user_data[metrics])
# last value
i = None
milestone_index = -1
action_counter = episode.user_data["action_counter"][milestone_index]
tot_actions = 0
for key, val in action_counter.items():
tot_actions += val
for key, val in action_counter.items():
episode.custom_metrics[f"pct_action_counter_{key}_{i}"] = val / tot_actions
metrics = [
"holdings_pct",
"time_pct",
"diff_pct",
"imbalance_all",
"imbalance_5",
"price_impact",
"spread",
"direction_feature",
]
for metric in metrics:
episode.custom_metrics[f"{metric}_{i}"] = episode.user_data[metric][
milestone_index
]
# milestone steps
num_max_steps_per_episode = episode.user_data["num_max_steps_per_episode"][-1]
num_milestone = 4
len_milestone = num_max_steps_per_episode / num_milestone
for i in range(num_milestone + 1):
milestone_index = int(i * len_milestone)
if milestone_index >= len(episode.user_data["action_counter"]):
break
action_counter = episode.user_data["action_counter"][milestone_index]
tot_actions = 0
for key, val in action_counter.items():
tot_actions += val
for key, val in action_counter.items():
episode.custom_metrics[f"pct_action_counter_{key}_{i}"] = (
val / tot_actions
)
for metric in metrics:
episode.custom_metrics[f"{metric}_{i}"] = episode.user_data[metric][
milestone_index
]
# TODO: add the episode.hist_data
def on_sample_end(self, *, worker: RolloutWorker, samples: SampleBatch, **kwargs):
"""Called at the end of RolloutWorker.sample().
Args:
worker (RolloutWorker): Reference to the current rollout worker.
samples (SampleBatch): Batch to be returned. You can mutate this
object to modify the samples generated.
kwargs: Forward compatibility placeholder.
"""
pass
def on_train_result(self, *, trainer, result: dict, **kwargs):
"""Called at the end of Trainable.train().
Args:
trainer (Trainer): Current trainer instance.
result (dict): Dict of results returned from trainer.train() call.
You can mutate this object to add additional metrics.
kwargs: Forward compatibility placeholder.
"""
pass
def on_learn_on_batch(
self, *, policy: Policy, train_batch: SampleBatch, result: dict, **kwargs
) -> None:
"""Called at the beginning of Policy.learn_on_batch().
Note: This is called before 0-padding via
`pad_batch_to_sequences_of_same_size`.
Args:
policy (Policy): Reference to the current Policy object.
train_batch (SampleBatch): SampleBatch to be trained on. You can
mutate this object to modify the samples generated.
result (dict): A results dict to add custom metrics to.
kwargs: Forward compatibility placeholder.
"""
pass
def on_postprocess_trajectory(
self,
*,
worker: RolloutWorker,
episode: MultiAgentEpisode,
agent_id: str,
policy_id: str,
policies: Dict[str, Policy],
postprocessed_batch: SampleBatch,
original_batches: Dict[str, SampleBatch],
**kwargs,
):
"""Called immediately after a policy's postprocess_fn is called.
You can use this callback to do additional postprocessing for a policy,
including looking at the trajectory data of other agents in multi-agent
settings.
Args:
worker (RolloutWorker): Reference to the current rollout worker.
episode (MultiAgentEpisode): Episode object.
agent_id (str): Id of the current agent.
policy_id (str): Id of the current policy for the agent.
policies (dict): Mapping of policy id to policy objects. In single
agent mode there will only be a single "default_policy".
postprocessed_batch (SampleBatch): The postprocessed sample batch
for this agent. You can mutate this object to apply your own
trajectory postprocessing.
original_batches (dict): Mapping of agents to their unpostprocessed
trajectory data. You should not mutate this object.
kwargs: Forward compatibility placeholder.
"""
if "num_batches" not in episode.custom_metrics:
episode.custom_metrics["num_batches"] = 0
episode.custom_metrics["num_batches"] += 1
| 10,203 | 39.332016 | 90 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/envs/__init__.py
|
from .markets_daily_investor_environment_v0 import SubGymMarketsDailyInvestorEnv_v0
from .markets_execution_environment_v0 import SubGymMarketsExecutionEnv_v0
| 159 | 52.333333 | 83 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/generators.py
|
from abc import abstractmethod, ABC
import numpy as np
from abides_core.generators import BaseGenerator
################## ORDER SIZE MODEL ###############################
class OrderSizeGenerator(BaseGenerator[int], ABC):
pass
class ConstantOrderSizeGenerator(OrderSizeGenerator):
def __init__(self, order_size: int) -> None:
self.order_size: int = order_size
def next(self) -> int:
return self.order_size
def mean(self) -> int:
return self.order_size
class UniformOrderSizeGenerator(OrderSizeGenerator):
def __init__(
self,
order_size_min: int,
order_size_max: int,
random_generator: np.random.RandomState,
) -> None:
self.order_size_min: int = order_size_min
self.order_size_max: int = order_size_max + 1
self.random_generator: np.random.RandomState = random_generator
def next(self) -> int:
return self.random_generator.randint(self.order_size_min, self.order_size_max)
def mean(self) -> float:
return (self.order_size_max - self.order_size_min - 1) / 2
################## ORDER DEPTH MODEL ###############################
class OrderDepthGenerator(BaseGenerator[int], ABC):
pass
class ConstantDepthGenerator(OrderDepthGenerator):
def __init__(self, order_depth: int) -> None:
self.order_depth: int = order_depth
def next(self) -> int:
return self.order_depth
def mean(self) -> int:
return self.order_depth
class UniformDepthGenerator(OrderDepthGenerator):
def __init__(
self,
order_depth_min: int,
order_depth_max: int,
random_generator: np.random.RandomState,
) -> None:
self.random_generator: np.random.RandomState = random_generator
self.order_depth_min: int = order_depth_min
self.order_depth_max: int = order_depth_max + 1
def next(self) -> int:
return self.random_generator.randint(self.order_depth_min, self.order_depth_max)
def mean(self) -> float:
return (self.order_depth_max - self.order_depth_min - 1) / 2
| 2,100 | 27.780822 | 88 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/orders.py
|
import sys
from abc import ABC, abstractmethod
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional
from abides_core import NanosecondTime
from abides_core.utils import fmt_ts
from .utils import dollarize
class Side(Enum):
BID = "BID"
ASK = "ASK"
def is_bid(self) -> bool:
return self == Side.BID
def is_ask(self) -> bool:
return self == Side.ASK
class Order(ABC):
"""A basic Order type used by an Exchange to conduct trades or maintain an order book.
This should not be confused with order Messages agents send to request an Order.
Specific order types will inherit from this (like LimitOrder).
"""
_order_id_counter: int = 0
@abstractmethod
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
order_id: Optional[int] = None,
tag: Any = None,
) -> None:
"""
Arguments:
agent_id: The ID of the agent that created this order.
time_placed: Time at which the order was created by the agent.
symbol: Equity symbol for the order.
quantity: Number of equity units affected by the order.
side: Indicates if an order is on the BID or ASK side of the market.
order_id: Either self generated or assigned. Should only be self
generated by the OrderBook class.
tag: A free-form user-defined field that can contain any information
relevant to the entity placing the order. Recommend keeping it
alphanumeric rather than shoving in objects, as it will be there
taking memory for the lifetime of the order and in all logging
mechanisms. Intent: for strategy agents to set tags to help keep
track of the intent of particular orders, to simplify their code.
"""
self.agent_id: int = agent_id
self.time_placed: NanosecondTime = time_placed
self.symbol: str = symbol
self.quantity: int = quantity
self.side: Side = side
if order_id is None:
order_id = Order._order_id_counter
Order._order_id_counter += 1
self.order_id: int = order_id
# Create placeholder fields that don't get filled in until certain events happen.
self.fill_price: Optional[int] = None
self.tag: Optional[Any] = tag
def to_dict(self) -> Dict[str, Any]:
as_dict = deepcopy(self).__dict__
as_dict["time_placed"] = fmt_ts(self.time_placed)
return as_dict
def __eq__(self, other):
return type(other) is type(self) and self.__dict__ == other.__dict__
def __deepcopy__(self, memodict={}):
raise NotImplementedError
class LimitOrder(Order):
"""
LimitOrder class that inherits from Order class and adds a limit price and a
hidden order flag.
These are the Orders that typically go in an Exchange's OrderBook.
"""
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
limit_price: int,
is_hidden: bool = False,
is_price_to_comply: bool = False,
insert_by_id: bool = False,
is_post_only=False,
order_id: Optional[int] = None,
tag: Optional[Any] = None,
) -> None:
super().__init__(
agent_id, time_placed, symbol, quantity, side, order_id, tag=tag
)
# The limit price is the minimum price the agent will accept (for a sell order) or
# the maximum price the agent will pay (for a buy order).
self.limit_price: int = limit_price
self.is_hidden: bool = is_hidden
self.is_price_to_comply: bool = is_price_to_comply
self.insert_by_id: bool = insert_by_id
self.is_post_only: bool = is_post_only
def __str__(self) -> str:
filled = ""
if self.fill_price:
filled = " (filled @ {})".format(dollarize(self.fill_price))
# Until we make explicit market orders, we make a few assumptions that EXTREME prices on limit
# orders are trying to represent a market order. This only affects printing - they still hit
# the order book like limit orders, which is wrong.
return "(Agent {} @ {}{}) : {} {} {} @ {}{}".format(
self.agent_id,
fmt_ts(self.time_placed),
f" [{self.tag}]" if self.tag is not None else "",
self.side.value,
self.quantity,
self.symbol,
dollarize(self.limit_price)
if abs(self.limit_price) < sys.maxsize
else "MKT",
filled,
)
def __repr__(self) -> str:
return self.__str__()
def __deepcopy__(self, memodict={}) -> "LimitOrder":
tag = None if self.tag is None else deepcopy(self.tag)
order = LimitOrder(
self.agent_id,
self.time_placed,
self.symbol,
self.quantity,
self.side,
self.limit_price,
self.is_hidden,
self.is_price_to_comply,
self.insert_by_id,
order_id=self.order_id,
is_post_only=self.is_post_only,
tag=tag,
)
order.fill_price = self.fill_price
return order
class MarketOrder(Order):
"""MarketOrder class, inherits from Order class."""
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
order_id: Optional[int] = None,
tag: Optional[Any] = None,
) -> None:
super().__init__(
agent_id, time_placed, symbol, quantity, side, order_id=order_id, tag=tag
)
def __str__(self) -> str:
return "(Agent {} @ {}) : MKT Order {} {} {}".format(
self.agent_id,
fmt_ts(self.time_placed),
self.side.value,
self.quantity,
self.symbol,
)
def __repr__(self) -> str:
return self.__str__()
def __deepcopy__(self, memodict={}) -> "MarketOrder":
tag = None if self.tag is None else deepcopy(self.tag)
order = MarketOrder(
self.agent_id,
self.time_placed,
self.symbol,
self.quantity,
self.side,
order_id=self.order_id,
tag=tag,
)
order.fill_price = self.fill_price
return order
| 6,652 | 29.240909 | 102 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/order_book.py
|
import logging
import sys
import warnings
from copy import deepcopy
from typing import Any, Dict, List, Optional, Set, Tuple
import numpy as np
import pandas as pd
from abides_core import Agent, NanosecondTime
from abides_core.utils import str_to_ns, ns_date
from .messages.orderbook import (
OrderAcceptedMsg,
OrderExecutedMsg,
OrderCancelledMsg,
OrderPartialCancelledMsg,
OrderModifiedMsg,
OrderReplacedMsg,
)
from .orders import LimitOrder, MarketOrder, Order, Side
from .price_level import PriceLevel
logger = logging.getLogger(__name__)
class OrderBook:
"""Basic class for an order book for one symbol, in the style of the major US Stock Exchanges.
An OrderBook requires an owning agent object, which it will use to send messages
outbound via the simulator Kernel (notifications of order creation, rejection,
cancellation, execution, etc).
Attributes:
owner: The agent this order book belongs to.
symbol: The symbol of the stock or security that is traded on this order book.
bids: List of bid price levels (index zero is best bid), stored as a PriceLevel object.
asks: List of ask price levels (index zero is best ask), stored as a PriceLevel object.
last_trade: The price that the last trade was made at.
book_log: Log of the full order book depth (price and volume) each time it changes.
book_log2: TODO
quotes_seen: TODO
history: A truncated history of previous trades.
last_update_ts: The last timestamp the order book was updated.
buy_transactions: An ordered list of all previous buy transaction timestamps and quantities.
sell_transactions: An ordered list of all previous sell transaction timestamps and quantities.
"""
def __init__(self, owner: Agent, symbol: str) -> None:
"""Creates a new OrderBook class instance for a single symbol.
Arguments:
owner: The agent this order book belongs to, usually an `ExchangeAgent`.
symbol: The symbol of the stock or security that is traded on this order book.
"""
self.owner: Agent = owner
self.symbol: str = symbol
self.bids: List[PriceLevel] = []
self.asks: List[PriceLevel] = []
self.last_trade: Optional[int] = None
# Create an empty list of dictionaries to log the full order book depth (price and volume) each time it changes.
self.book_log2: List[Dict[str, Any]] = []
self.quotes_seen: Set[int] = set()
# Create an order history for the exchange to report to certain agent types.
self.history: List[Dict[str, Any]] = []
self.last_update_ts: Optional[NanosecondTime] = self.owner.mkt_open
self.buy_transactions: List[Tuple[NanosecondTime, int]] = []
self.sell_transactions: List[Tuple[NanosecondTime, int]] = []
def handle_limit_order(self, order: LimitOrder, quiet: bool = False) -> None:
"""Matches a limit order or adds it to the order book.
Handles partial matches piecewise,
consuming all possible shares at the best price before moving on, without regard to
order size "fit" or minimizing number of transactions. Sends one notification per
match.
Arguments:
order: The limit order to process.
quiet: If True messages will not be sent to agents and entries will not be added to
history. Used when this function is a part of a more complex order.
"""
if order.symbol != self.symbol:
warnings.warn(
f"{order.symbol} order discarded. Does not match OrderBook symbol: {self.symbol}"
)
return
if (order.quantity <= 0) or (int(order.quantity) != order.quantity):
warnings.warn(
f"{order.symbol} order discarded. Quantity ({order.quantity}) must be a positive integer."
)
return
if (order.limit_price < 0) or (int(order.limit_price) != order.limit_price):
warnings.warn(
f"{order.symbol} order discarded. Limit price ({order.limit_price}) must be a positive integer."
)
return
executed: List[Tuple[int, int]] = []
while True:
matched_order = self.execute_order(order)
if matched_order is not None:
# Accumulate the volume and average share price of the currently executing inbound trade.
assert matched_order.fill_price is not None
executed.append((matched_order.quantity, matched_order.fill_price))
if order.quantity <= 0:
break
else:
# No matching order was found, so the new order enters the order book. Notify the agent.
self.enter_order(deepcopy(order), quiet=quiet)
logger.debug("ACCEPTED: new order {}", order)
logger.debug(
"SENT: notifications of order acceptance to agent {} for order {}",
order.agent_id,
order.order_id,
)
if not quiet:
self.owner.send_message(order.agent_id, OrderAcceptedMsg(order))
break
# Now that we are done executing or accepting this order, log the new best bid and ask.
if self.bids:
self.owner.logEvent(
"BEST_BID",
"{},{},{}".format(
self.symbol, self.bids[0].price, self.bids[0].total_quantity
),
)
if self.asks:
self.owner.logEvent(
"BEST_ASK",
"{},{},{}".format(
self.symbol, self.asks[0].price, self.asks[0].total_quantity
),
)
# Also log the last trade (total share quantity, average share price).
if len(executed) > 0:
trade_qty = 0
trade_price = 0
for q, p in executed:
logger.debug("Executed: {} @ {}", q, p)
trade_qty += q
trade_price += p * q
avg_price = int(round(trade_price / trade_qty))
logger.debug(f"Avg: {trade_qty} @ ${avg_price:0.4f}")
self.owner.logEvent("LAST_TRADE", f"{trade_qty},${avg_price:0.4f}")
self.last_trade = avg_price
def handle_market_order(self, order: MarketOrder) -> None:
"""Takes a market order and attempts to fill at the current best market price.
Arguments:
order: The market order to process.
"""
if order.symbol != self.symbol:
warnings.warn(
f"{order.symbol} order discarded. Does not match OrderBook symbol: {self.symbol}"
)
return
if (order.quantity <= 0) or (int(order.quantity) != order.quantity):
warnings.warn(
f"{order.symbol} order discarded. Quantity ({order.quantity}) must be a positive integer."
)
return
order = deepcopy(order)
while order.quantity > 0:
if self.execute_order(order) is None:
break
def execute_order(self, order: Order) -> Optional[Order]:
"""Finds a single best match for this order, without regard for quantity.
Returns the matched order or None if no match found. DOES remove,
or decrement quantity from, the matched order from the order book
(i.e. executes at least a partial trade, if possible).
Arguments:
order: The order to execute.
"""
# Track which (if any) existing order was matched with the current order.
book = self.asks if order.side.is_bid() else self.bids
# First, examine the correct side of the order book for a match.
if len(book) == 0:
# No orders on this side.
return None
elif isinstance(order, LimitOrder) and not book[0].order_is_match(order):
# There were orders on the right side, but the prices do not overlap.
# Or: bid could not match with best ask, or vice versa.
# Or: bid offer is below the lowest asking price, or vice versa.
return None
elif order.tag in ["MR_preprocess_ADD", "MR_preprocess_REPLACE"]:
# if an order enters here it means it was going to execute at entry
# but instead it was caught by MR_preprocess_add
self.owner.logEvent(order.tag + "_POST_ONLY", {"order_id": order.order_id})
return None
else:
# There are orders on the right side, and the new order's price does fall
# somewhere within them. We can/will only match against the oldest order
# among those with the best price. (i.e. best price, then FIFO)
# The matched order might be only partially filled. (i.e. new order is smaller)
is_ptc_exec = False
if order.quantity >= book[0].peek()[0].quantity:
# Consume entire matched order.
matched_order, matched_order_metadata = book[0].pop()
# If the order is a part of a price to comply pair, also remove the other
# half of the order from the book.
if matched_order.is_price_to_comply:
is_ptc_exec = True
if matched_order_metadata["ptc_hidden"] == False:
raise Exception(
"Should not be executing on the visible half of a price to comply order!"
)
assert book[1].remove_order(matched_order.order_id) is not None
if book[1].is_empty:
del book[1]
# If the matched price now has no orders, remove it completely.
if book[0].is_empty:
del book[0]
else:
# Consume only part of matched order.
book_order, book_order_metadata = book[0].peek()
matched_order = deepcopy(book_order)
matched_order.quantity = order.quantity
book_order.quantity -= matched_order.quantity
# If the order is a part of a price to comply pair, also adjust the
# quantity of the other half of the pair.
if book_order.is_price_to_comply:
is_ptc_exec = True
if book_order_metadata["ptc_hidden"] == False:
raise Exception(
"Should not be executing on the visible half of a price to comply order!"
)
book_order_metadata[
"ptc_other_half"
].quantity -= matched_order.quantity
# When two limit orders are matched, they execute at the price that
# was being "advertised" in the order book.
matched_order.fill_price = matched_order.limit_price
if order.side.is_bid():
self.buy_transactions.append(
(self.owner.current_time, matched_order.quantity)
)
else:
self.sell_transactions.append(
(self.owner.current_time, matched_order.quantity)
)
self.history.append(
dict(
time=self.owner.current_time,
type="EXEC",
order_id=matched_order.order_id,
agent_id=matched_order.agent_id,
oppos_order_id=order.order_id,
oppos_agent_id=order.agent_id,
side="SELL"
if order.side.is_bid()
else "BUY", # by def exec if from point of view of passive order being exec
quantity=matched_order.quantity,
price=matched_order.limit_price if is_ptc_exec else None,
)
)
filled_order = deepcopy(order)
filled_order.quantity = matched_order.quantity
filled_order.fill_price = matched_order.fill_price
order.quantity -= filled_order.quantity
logger.debug(
"MATCHED: new order {} vs old order {}", filled_order, matched_order
)
logger.debug(
"SENT: notifications of order execution to agents {} and {} for orders {} and {}",
filled_order.agent_id,
matched_order.agent_id,
filled_order.order_id,
matched_order.order_id,
)
self.owner.send_message(
matched_order.agent_id, OrderExecutedMsg(matched_order)
)
self.owner.send_message(order.agent_id, OrderExecutedMsg(filled_order))
if self.owner.book_logging == True:
# append current OB state to book_log2
self.append_book_log2()
# Return (only the executed portion of) the matched order.
return matched_order
def enter_order(
self,
order: LimitOrder,
metadata: Optional[Dict] = None,
quiet: bool = False, ###!! originally true
) -> None:
"""Enters a limit order into the OrderBook in the appropriate location.
This does not test for matching/executing orders -- this function
should only be called after a failed match/execution attempt.
Arguments:
order: The limit order to enter into the order book.
quiet: If True messages will not be sent to agents and entries will not be added to
history. Used when this function is a part of a more complex order.
"""
if order.is_price_to_comply and (
(metadata is None) or (metadata == {}) or ("ptc_hidden" not in metadata)
):
hidden_order = deepcopy(order)
visible_order = deepcopy(order)
hidden_order.is_hidden = True
# Adjust price of displayed order to one tick away from the center of the market
hidden_order.limit_price += 1 if order.side.is_bid() else -1
hidden_order_metadata = dict(
ptc_hidden=True,
ptc_other_half=visible_order,
)
visible_order_metadata = dict(
ptc_hidden=False,
ptc_other_half=hidden_order,
)
self.enter_order(hidden_order, hidden_order_metadata, quiet=True)
self.enter_order(visible_order, visible_order_metadata, quiet=quiet)
return
book = self.bids if order.side.is_bid() else self.asks
if len(book) == 0:
# There were no orders on this side of the book.
book.append(PriceLevel([(order, metadata or {})]))
elif book[-1].order_has_worse_price(order):
# There were orders on this side, but this order is worse than all of them.
# (New lowest bid or highest ask.)
book.append(PriceLevel([(order, metadata or {})]))
else:
# There are orders on this side. Insert this order in the correct position in the list.
# Note that o is a LIST of all orders (oldest at index 0) at this same price.
for i, price_level in enumerate(book):
if price_level.order_has_better_price(order):
book.insert(i, PriceLevel([(order, metadata or {})]))
break
elif price_level.order_has_equal_price(order):
book[i].add_order(order, metadata or {})
break
if quiet == False:
self.history.append(
dict(
time=self.owner.current_time,
type="LIMIT",
order_id=order.order_id,
agent_id=order.agent_id,
side=order.side.value,
quantity=order.quantity,
price=order.limit_price,
)
)
if (self.owner.book_logging == True) and (quiet == False):
# append current OB state to book_log2
self.append_book_log2()
def cancel_order(
self,
order: LimitOrder,
tag: str = None,
cancellation_metadata: Optional[Dict] = None,
quiet: bool = False,
) -> bool:
"""Attempts to cancel (the remaining, unexecuted portion of) a trade in the order book.
By definition, this pretty much has to be a limit order. If the order cannot be found
in the order book (probably because it was already fully executed), presently there is
no message back to the agent. This should possibly change to some kind of failed
cancellation message. (?) Otherwise, the agent receives ORDER_CANCELLED with the
order as the message body, with the cancelled quantity correctly represented as the
number of shares that had not already been executed.
Arguments:
order: The limit order to cancel from the order book.
quiet: If True messages will not be sent to agents and entries will not be added to
history. Used when this function is a part of a more complex order.
Returns:
A bool indicating if the order cancellation was successful.
"""
book = self.bids if order.side.is_bid() else self.asks
# If there are no orders on this side of the book, there is nothing to do.
if not book:
return False
# There are orders on this side. Find the price level of the order to cancel,
# then find the exact order and cancel it.
for i, price_level in enumerate(book):
if not price_level.order_has_equal_price(order):
continue
# cancelled_order, metadata = (lambda x: x if x!=None else (None,None))(price_level.remove_order(order.order_id))
cancelled_order_result = price_level.remove_order(order.order_id)
if cancelled_order_result is not None:
cancelled_order, metadata = cancelled_order_result
# If the cancelled price now has no orders, remove it completely.
if price_level.is_empty:
del book[i]
logger.debug("CANCELLED: order {}", order)
logger.debug(
"SENT: notifications of order cancellation to agent {} for order {}",
cancelled_order.agent_id,
cancelled_order.order_id,
)
if cancelled_order.is_price_to_comply:
self.cancel_order(metadata["ptc_other_half"], quiet=True)
if not quiet:
self.history.append(
dict(
time=self.owner.current_time,
type="CANCEL",
order_id=cancelled_order.order_id,
tag=tag,
metadata=cancellation_metadata
if tag == "auctionFill"
else None,
)
)
self.owner.send_message(
order.agent_id, OrderCancelledMsg(cancelled_order)
)
# We found the order and cancelled it, so stop looking.
self.last_update_ts = self.owner.current_time
if (self.owner.book_logging == True) and (quiet == False):
### append current OB state to book_log2
self.append_book_log2()
return True
return False
def modify_order(self, order: LimitOrder, new_order: LimitOrder) -> None:
"""Modifies the quantity of an existing limit order in the order book.
Arguments:
order: The existing order in the order book.
new_order: The new order to replace the old order with.
"""
if order.order_id != new_order.order_id:
return
book = self.bids if order.side.is_bid() else self.asks
for price_level in book:
if not price_level.order_has_equal_price(order):
continue
if price_level.update_order_quantity(order.order_id, new_order.quantity):
self.history.append(
dict(
time=self.owner.current_time,
type="MODIFY",
order_id=order.order_id,
new_side=order.side.value,
new_quantity=new_order.quantity,
)
)
logger.debug("MODIFIED: order {}", order)
logger.debug(
"SENT: notifications of order modification to agent {} for order {}",
new_order.agent_id,
new_order.order_id,
)
self.owner.send_message(order.agent_id, OrderModifiedMsg(new_order))
self.last_update_ts = self.owner.current_time
if self.owner.book_logging == True is not None:
# append current OB state to book_log2
self.append_book_log2()
def partial_cancel_order(
self,
order: LimitOrder,
quantity: int,
tag: str = None,
cancellation_metadata: Optional[Dict] = None,
) -> None:
"""cancel a part of the quantity of an existing limit order in the order book.
Arguments:
order: The existing order in the order book.
new_order: The new order to replace the old order with.
"""
if order.order_id == 19653081:
print("inside OB partialCancel")
book = self.bids if order.side.is_bid() else self.asks
new_order = deepcopy(order)
new_order.quantity -= quantity
for price_level in book:
if not price_level.order_has_equal_price(order):
continue
if price_level.update_order_quantity(order.order_id, new_order.quantity):
self.history.append(
dict(
time=self.owner.current_time,
type="CANCEL_PARTIAL",
order_id=order.order_id,
quantity=quantity,
tag=tag,
metadata=cancellation_metadata
if tag == "auctionFill"
else None,
)
)
logger.debug("CANCEL_PARTIAL: order {}", order)
logger.debug(
"SENT: notifications of order partial cancellation to agent {} for order {}",
new_order.agent_id,
quantity,
)
self.owner.send_message(
order.agent_id, OrderPartialCancelledMsg(new_order)
)
self.last_update_ts = self.owner.current_time
if self.owner.book_logging == True:
### append current OB state to book_log2
self.append_book_log2()
def replace_order(
self,
agent_id: int,
old_order: LimitOrder,
new_order: LimitOrder,
) -> None:
"""Removes an order from the book and replaces it with a new one in one step.
This is equivalent to calling cancel_order followed by handle_limit_order.
If the old order cannot be cancelled, the new order is not inserted.
Arguments:
agent_id: The ID of the agent making this request - this must be the ID of
the agent who initially created the order.
old_order: The existing order in the order book to be cancelled.
new_order: The new order to be inserted into the order book.
"""
if self.cancel_order(old_order, quiet=True) == True:
self.history.append(
dict(
time=self.owner.current_time,
type="REPLACE",
old_order_id=old_order.order_id,
new_order_id=new_order.order_id,
quantity=new_order.quantity,
price=new_order.limit_price,
)
)
self.handle_limit_order(new_order, quiet=True)
logger.debug(
"SENT: notifications of order replacement to agent {agent_id} for old order {old_order.order_id}, new order {new_order.order_id}"
)
self.owner.send_message(agent_id, OrderReplacedMsg(old_order, new_order))
if self.owner.book_logging == True:
# append current OB state to book_log2
self.append_book_log2()
def append_book_log2(self):
row = {
"QuoteTime": self.owner.current_time,
"bids": np.array(self.get_l2_bid_data(depth=self.owner.book_log_depth)),
"asks": np.array(self.get_l2_ask_data(depth=self.owner.book_log_depth)),
}
# if (row["bids"][0][0]>=row["asks"][0][0]): print("WARNING: THIS IS A REAL PROBLEM: an order book contains bids and asks at the same quote price!")
self.book_log2.append(row)
def get_l1_bid_data(self) -> Optional[Tuple[int, int]]:
"""Returns the current best bid price and of the book and the volume at this price."""
if len(self.bids) == 0:
return None
index = 0
while not self.bids[index].total_quantity > 0:
index += 1
return self.bids[0].price, self.bids[0].total_quantity
def get_l1_ask_data(self) -> Optional[Tuple[int, int]]:
"""Returns the current best ask price of the book and the volume at this price."""
if len(self.asks) == 0:
return None
index = 0
while not self.asks[index].total_quantity > 0:
index += 1
return self.asks[index].price, self.asks[index].total_quantity
def get_l2_bid_data(self, depth: int = sys.maxsize) -> List[Tuple[int, int]]:
"""Returns the price and total quantity of all limit orders on the bid side.
Arguments:
depth: If given, will only return data for the first N levels of the order book side.
Returns:
A list of tuples where the first element of the tuple is the price and the second
element of the tuple is the total volume at that price.
The list is given in order of price, with the centre of the book first.
"""
return list(
filter(
lambda x: x[1] > 0,
[
(price_level.price, price_level.total_quantity)
for price_level in self.bids[:depth]
],
)
)
def get_l2_ask_data(self, depth: int = sys.maxsize) -> List[Tuple[int, int]]:
"""Returns the price and total quantity of all limit orders on the ask side.
Arguments:
depth: If given, will only return data for the first N levels of the order book side.
Returns:
A list of tuples where the first element of the tuple is the price and the second
element of the tuple is the total volume at that price.
The list is given in order of price, with the centre of the book first.
"""
return list(
filter(
lambda x: x[1] > 0,
[
(price_level.price, price_level.total_quantity)
for price_level in self.asks[:depth]
],
)
)
def get_l3_bid_data(self, depth: int = sys.maxsize) -> List[Tuple[int, List[int]]]:
"""Returns the price and quantity of all limit orders on the bid side.
Arguments:
depth: If given, will only return data for the first N levels of the order book side.
Returns:
A list of tuples where the first element of the tuple is the price and the second
element of the tuple is the list of order quantities at that price.
The list of order quantities is given in order of priority and the overall list
is given in order of price, with the centre of the book first.
"""
return [
(
price_level.price,
[order.quantity for order, _ in price_level.visible_orders],
)
for price_level in self.bids[:depth]
]
def get_l3_ask_data(self, depth: int = sys.maxsize) -> List[Tuple[int, List[int]]]:
"""Returns the price and quantity of all limit orders on the ask side.
Arguments:
depth: If given, will only return data for the first N levels of the order book side.
Returns:
A list of tuples where the first element of the tuple is the price and the second
element of the tuple is the list of order quantities at that price.
The list of order quantities is given in order of priority and the overall list
is given in order of price, with the centre of the book first.
"""
return [
(
price_level.price,
[order.quantity for order, _ in price_level.visible_orders],
)
for price_level in self.asks[:depth]
]
def get_transacted_volume(self, lookback_period: str = "10min") -> Tuple[int, int]:
"""Method retrieves the total transacted volume for a symbol over a lookback
period finishing at the current simulation time.
Arguments:
lookback_period: The period in time from the current time to calculate the
transacted volume for.
"""
window_start = self.owner.current_time - str_to_ns(lookback_period)
buy_transacted_volume = 0
sell_transacted_volume = 0
for time, volume in reversed(self.buy_transactions):
if time < window_start:
break
buy_transacted_volume += volume
for time, volume in reversed(self.sell_transactions):
if time < window_start:
break
sell_transacted_volume += volume
return (buy_transacted_volume, sell_transacted_volume)
def get_imbalance(self) -> Tuple[float, Optional[Side]]:
"""Returns a measure of book side total volume imbalance.
Returns:
A tuple containing the volume imbalance value and the side the order
book is in imbalance to.
Examples:
- Both book sides have the exact same volume --> (0.0, None)
- 2x bid volume vs. ask volume --> (0.5, Side.BID)
- 2x ask volume vs. bid volume --> (0.5, Side.ASK)
- Ask has no volume --> (1.0, Side.BID)
- Bid has no volume --> (1.0, Side.ASK)
"""
bid_vol = sum(price_level.total_quantity for price_level in self.bids)
ask_vol = sum(price_level.total_quantity for price_level in self.asks)
if bid_vol == ask_vol:
return (0, None)
elif bid_vol == 0:
return (1.0, Side.ASK)
elif ask_vol == 0:
return (1.0, Side.BID)
elif bid_vol < ask_vol:
return (1 - bid_vol / ask_vol, Side.ASK)
else:
return (1 - ask_vol / bid_vol, Side.BID)
def get_L1_snapshots(self):
best_bids = []
best_asks = []
def safe_first(x):
return x[0] if len(x) > 0 else np.array([None, None])
for d in self.book_log2:
best_bids.append([d["QuoteTime"]] + safe_first(d["bids"]).tolist())
best_asks.append([d["QuoteTime"]] + safe_first(d["asks"]).tolist())
best_bids = np.array(best_bids)
best_asks = np.array(best_asks)
return {"best_bids": best_bids, "best_asks": best_asks}
## take a bids matrix [[pi,qi]] and adds next lower prices and 0 qty levels to make it nlevels format
def bids_padding(self, book, nlevels):
n = book.shape[0]
if n == 0:
return np.zeros((nlevels, 2), dtype=int)
if n >= nlevels:
return book[:nlevels, :]
else:
lowestprice = book[-1, 0] if len(book.shape) == 2 else book[0]
npad = nlevels - n
pad = np.transpose(
np.array(
[
-1 + np.arange(lowestprice, lowestprice - npad, -1, dtype=int),
np.zeros(npad, dtype=int),
]
)
)
if len(pad.shape) == 1:
pad = pad.reshape(1, 2)
return np.concatenate([book, pad])
## take a asks matrix [[pi,qi]] and adds next higher prices and 0 qty levels to make it nlevels format
def asks_padding(self, book, nlevels):
n = book.shape[0]
if n == 0:
return np.zeros((nlevels, 2), dtype=int)
if n >= nlevels:
return book[:nlevels, :]
else:
highestprice = book[-1, 0] if len(book.shape) == 2 else book[0]
npad = nlevels - n
pad = np.transpose(
np.array(
[
1 + np.arange(highestprice, highestprice + npad, 1, dtype=int),
np.zeros(npad, dtype=int),
]
)
)
if len(pad.shape) == 1:
pad = pad.reshape(1, 2)
return np.concatenate([book, pad])
def get_L2_snapshots(self, nlevels):
times, bids, asks = [], [], []
for x in self.book_log2:
times.append(x["QuoteTime"])
bids.append(self.bids_padding(x["bids"], nlevels))
asks.append(self.asks_padding(x["asks"], nlevels))
bids = np.array(bids)
asks = np.array(asks)
times = np.array(times)
return {"times": times, "bids": bids, "asks": asks}
def get_l3_itch(self):
history_l3 = pd.DataFrame(self.history)
history_l3.loc[history_l3.tag == "auctionFill", "type"] = "EXEC"
history_l3.loc[history_l3.tag == "auctionFill", "quantity"] = history_l3.loc[
history_l3.tag == "auctionFill", "metadata"
].apply(lambda x: x["quantity"])
history_l3.loc[history_l3.tag == "auctionFill", "price"] = history_l3.loc[
history_l3.tag == "auctionFill", "metadata"
].apply(lambda x: x["price"])
history_l3["printable"] = np.nan
history_l3["stock"] = np.nan
if not "REPLACE" in history_l3.type.unique():
history_l3["new_order_id"] = np.nan
history_l3["old_order_id"] = np.nan
history_l3.loc[history_l3.type == "REPLACE", "order_id"] = history_l3.loc[
history_l3.type == "REPLACE", "old_order_id"
]
history_l3.loc[history_l3.type == "EXEC", "side"] = np.nan
history_l3["type"] = history_l3["type"].replace(
{
"LIMIT": "ADD",
"CANCEL_PARTIAL": "CANCEL",
"CANCEL": "DELETE",
"EXEC": "EXECUTE",
# "MODIFY":"CANCEL"### not 100% sure, there might be actual order modifications
}
)
history_l3["side"] = history_l3["side"].replace({"ASK": "S", "BID": "B"})
history_l3["time"] = history_l3["time"] - ns_date(history_l3["time"])
history_l3["price"] = history_l3["price"] * 100
# history_l3 = history_l3.drop(["old_order_id","oppos_order_id","agent_id","oppos_agent_id","tag"],axis=1)
history_l3 = history_l3[
[
"time",
"stock",
"type",
"order_id",
"side",
"quantity",
"price",
"new_order_id",
"printable",
]
]
history_l3 = history_l3.rename(
columns={
"time": "timestamp",
"order_id": "reference",
"new_order_id": "new_reference",
"quantity": "shares",
}
)
return history_l3
def pretty_print(self, silent: bool = True) -> Optional[str]:
"""Print a nicely-formatted view of the current order book.
Arguments:
silent:
"""
# Start at the highest ask price and move down. Then switch to the highest bid price and move down.
# Show the total volume at each price. If silent is True, return the accumulated string and print nothing.
assert self.last_trade is not None
book = "{} order book as of {}\n".format(self.symbol, self.owner.current_time)
book += "Last trades: simulated {:d}, historical {:d}\n".format(
self.last_trade,
self.owner.oracle.observe_price(
self.symbol,
self.owner.current_time,
sigma_n=0,
random_state=self.owner.random_state,
),
)
book += "{:10s}{:10s}{:10s}\n".format("BID", "PRICE", "ASK")
book += "{:10s}{:10s}{:10s}\n".format("---", "-----", "---")
for quote, volume in self.get_l2_ask_data()[-1::-1]:
book += "{:10s}{:10s}{:10s}\n".format(
"", "{:d}".format(quote), "{:d}".format(volume)
)
for quote, volume in self.get_l2_bid_data():
book += "{:10s}{:10s}{:10s}\n".format(
"{:d}".format(volume), "{:d}".format(quote), ""
)
if silent:
return book
else:
print(book)
return None
| 38,202 | 37.903259 | 156 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/__init__.py
| 0 | 0 | 0 |
py
|
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/price_level.py
|
from typing import Dict, List, Optional, Tuple
from .orders import LimitOrder, Side
class PriceLevel:
"""
A class that represents a single price level containing multiple orders for one
side of an order book. The option to have hidden orders is supported. This class
abstracts the complextity of handling both visible and hidden orders away from
the parent order book.
Visible orders are consumed first, followed by any hidden orders.
Attributes:
visible_orders: A list of visible orders, where the order with index=0 is first
in the queue and will be exexcuted first.
hidden_orders: A list of hidden orders, where the order with index=0 is first
in the queue and will be exexcuted first.
price: The price this PriceLevel represents.
side: The side of the market this PriceLevel represents.
"""
def __init__(self, orders: List[Tuple[LimitOrder, Dict]]) -> None:
"""
Arguments:
orders: A list of orders, containing both visible and hidden orders that
will be correctly allocated on initialisation. At least one order must
be given.
"""
if len(orders) == 0:
raise ValueError(
"At least one LimitOrder must be given when initialising a PriceLevel."
)
self.visible_orders: List[Tuple[LimitOrder, Dict]] = []
self.hidden_orders: List[Tuple[LimitOrder, Dict]] = []
self.price: int = orders[0][0].limit_price
self.side: Side = orders[0][0].side
for order, metadata in orders:
self.add_order(order, metadata)
def add_order(self, order: LimitOrder, metadata: Optional[Dict] = None) -> None:
"""
Adds an order to the correct queue in the price level.
Orders are added to the back of their respective queue.
Arguments:
order: The `LimitOrder` to add, can be visible or hidden.
metadata: Optional dict of metadata values to associate with the order.
"""
if order.is_hidden:
self.hidden_orders.append((order, metadata or {}))
elif order.insert_by_id:
insert_index = 0
for (order2, _) in self.visible_orders:
if order2.order_id > order.order_id:
break
insert_index += 1
self.visible_orders.insert(insert_index, (order, metadata or {}))
else:
self.visible_orders.append((order, metadata or {}))
def update_order_quantity(self, order_id: int, new_quantity: int) -> bool:
"""
Updates the quantity of an order.
The new_quantity must be greater than 0. To remove an order from the price
level use the `remove_order` method instead.
If the new quantity is less than or equal to the current quantity the order's
position in its respective queue will be maintained.
If the new quantity is more than the current quantity the order will be moved
to the back of its respective queue.
Arguments:
order_id: The ID of the order to update.
quantity: The new quantity to update with.
Returns:
True if the update was sucessful, False if a matching order with the
given ID could not be found or if the new quantity given is 0.
"""
if new_quantity == 0:
return False
for i, (order, metadata) in enumerate(self.visible_orders):
if order.order_id == order_id:
if new_quantity <= order.quantity:
order.quantity = new_quantity
else:
self.visible_orders.pop(i)
order.quantity = new_quantity
self.visible_orders.append((order, metadata))
return True
for i, (order, metadata) in enumerate(self.hidden_orders):
if order.order_id == order_id:
if new_quantity <= order.quantity:
order.quantity = new_quantity
else:
self.hidden_orders.pop(i)
order.quantity = new_quantity
self.hidden_orders.append((order, metadata))
return True
return False
def remove_order(self, order_id: int) -> Optional[Tuple[LimitOrder, Dict]]:
"""
Attempts to remove an order from the price level.
Arguments:
order_id: The ID of the order to remove.
Returns:
The order object if the order was found and removed, else None.
"""
for i, (book_order, _) in enumerate(self.visible_orders):
if book_order.order_id == order_id:
return self.visible_orders.pop(i)
for i, (book_order, _) in enumerate(self.hidden_orders):
if book_order.order_id == order_id:
return self.hidden_orders.pop(i)
return None
def peek(self) -> Tuple[LimitOrder, Dict]:
"""
Returns the highest priority order in the price level. Visible orders are returned first,
followed by hidden orders if no visible order exist.
Raises a ValueError exception if the price level has no orders.
"""
if len(self.visible_orders) > 0:
return self.visible_orders[0]
elif len(self.hidden_orders) > 0:
return self.hidden_orders[0]
else:
raise ValueError(
"Can't peek at LimitOrder in PriceLevel as it contains no orders"
)
def pop(self) -> Tuple[LimitOrder, Dict]:
"""
Removes the highest priority order in the price level and returns it. Visible
orders are returned first, followed by hidden orders if no visible order exist.
Raises a ValueError exception if the price level has no orders.
"""
if len(self.visible_orders) > 0:
return self.visible_orders.pop(0)
elif len(self.hidden_orders) > 0:
return self.hidden_orders.pop(0)
else:
raise ValueError(
"Can't pop LimitOrder from PriceLevel as it contains no orders"
)
def order_is_match(self, order: LimitOrder) -> bool:
"""
Checks if an order on the opposite side of the book is a match with this price
level.
The given order must be a `LimitOrder`.
Arguments:
order: The order to compare.
Returns:
True if the order is a match.
"""
if order.side == self.side:
raise ValueError("Attempted to compare order on wrong side of book")
if (
order.side.is_bid()
and (order.limit_price >= self.price)
and (not (order.is_post_only and self.total_quantity == 0))
):
return True
if (
order.side.is_ask()
and (order.limit_price <= self.price)
and (not (order.is_post_only and self.total_quantity == 0))
):
return True
return False
def order_has_better_price(self, order: LimitOrder) -> bool:
"""
Checks if an order on this side of the book has a better price than this price
level.
Arguments:
order: The order to compare.
Returns:
True if the given order has a better price.
"""
if order.side != self.side:
raise ValueError("Attempted to compare order on wrong side of book")
if order.side.is_bid() and (order.limit_price > self.price):
return True
if order.side.is_ask() and (order.limit_price < self.price):
return True
return False
def order_has_worse_price(self, order: LimitOrder) -> bool:
"""
Checks if an order on this side of the book has a worse price than this price
level.
Arguments:
order: The order to compare.
Returns:
True if the given order has a worse price.
"""
if order.side != self.side:
raise ValueError("Attempted to compare order on wrong side of book")
if order.side.is_bid() and (order.limit_price < self.price):
return True
if order.side.is_ask() and (order.limit_price > self.price):
return True
return False
def order_has_equal_price(self, order: LimitOrder) -> bool:
"""
Checks if an order on this side of the book has an equal price to this price
level.
Arguments:
order: The order to compare.
Returns:
True if the given order has an equal price.
"""
if order.side != self.side:
raise ValueError("Attempted to compare order on wrong side of book")
return order.limit_price == self.price
@property
def total_quantity(self) -> int:
"""
Returns the total visible order quantity of this price level.
"""
return sum(order.quantity for order, _ in self.visible_orders)
@property
def is_empty(self) -> bool:
"""
Returns True if this price level has no orders.
"""
return len(self.visible_orders) == 0 and len(self.hidden_orders) == 0
def __eq__(self, other: object) -> bool:
if not isinstance(other, PriceLevel):
raise NotImplementedError
return (
self.visible_orders == other.visible_orders
and self.hidden_orders == other.hidden_orders
)
| 9,655 | 33.241135 | 97 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/orderbook.py
|
from abc import ABC
from dataclasses import dataclass
from abides_core import Message
from ..orders import LimitOrder, Order
@dataclass
class OrderBookMsg(Message, ABC):
pass
@dataclass
class OrderAcceptedMsg(OrderBookMsg):
order: LimitOrder
@dataclass
class OrderExecutedMsg(OrderBookMsg):
order: Order
@dataclass
class OrderCancelledMsg(OrderBookMsg):
order: LimitOrder
@dataclass
class OrderPartialCancelledMsg(OrderBookMsg):
new_order: LimitOrder
@dataclass
class OrderModifiedMsg(OrderBookMsg):
new_order: LimitOrder
@dataclass
class OrderReplacedMsg(OrderBookMsg):
old_order: LimitOrder
new_order: LimitOrder
| 663 | 14.44186 | 45 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/marketdata.py
|
import sys
from abc import ABC
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Tuple
from abides_core import Message, NanosecondTime
from ..orders import Side
@dataclass
class MarketDataSubReqMsg(Message, ABC):
"""
Base class for creating or cancelling market data subscriptions with an
``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
"""
symbol: str
cancel: bool = False
@dataclass
class MarketDataFreqBasedSubReqMsg(MarketDataSubReqMsg, ABC):
"""
Base class for creating or cancelling market data subscriptions with an
``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
freq: The frequency in nanoseconds^-1 at which to receive market updates.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
freq: int = 1
@dataclass
class MarketDataEventBasedSubReqMsg(MarketDataSubReqMsg, ABC):
"""
Base class for creating or cancelling market data subscriptions with an
``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
@dataclass
class L1SubReqMsg(MarketDataFreqBasedSubReqMsg):
"""
This message requests the creation or cancellation of a subscription to L1 order
book data from an ``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
freq: The frequency in nanoseconds^-1 at which to receive market updates.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
# freq: int = 1
pass
@dataclass
class L2SubReqMsg(MarketDataFreqBasedSubReqMsg):
"""
This message requests the creation or cancellation of a subscription to L2 order
book data from an ``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
freq: The frequency in nanoseconds^-1 at which to receive market updates.
depth: The maximum number of price levels on both sides of the order book to
return data for. Defaults to the entire book.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
# freq: int = 1
depth: int = sys.maxsize
@dataclass
class L3SubReqMsg(MarketDataFreqBasedSubReqMsg):
"""
This message requests the creation or cancellation of a subscription to L3 order
book data from an ``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
freq: The frequency in nanoseconds^-1 at which to receive market updates.
depth: The maximum number of price levels on both sides of the order book to
return data for. Defaults to the entire book.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
# freq: int = 1
depth: int = sys.maxsize
@dataclass
class TransactedVolSubReqMsg(MarketDataFreqBasedSubReqMsg):
"""
This message requests the creation or cancellation of a subscription to transacted
volume order book data from an ``ExchangeAgent``.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
freq: The frequency in nanoseconds^-1 at which to receive market updates.
lookback: The period in time backwards from the present to sum the transacted
volume for.
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
# freq: int = 1
lookback: str = "1min"
@dataclass
class BookImbalanceSubReqMsg(MarketDataEventBasedSubReqMsg):
"""
This message requests the creation or cancellation of a subscription to book
imbalance events.
Attributes:
symbol: The symbol of the security to request a data subscription for.
cancel: If True attempts to create a new subscription, if False attempts to
cancel an existing subscription.
min_imbalance: The minimum book imbalance needed to trigger this subscription.
0.0 is no imbalance.
1.0 is full imbalance (ie. liquidity drop).
"""
# Inherited Fields:
# symbol: str
# cancel: bool = False
min_imbalance: float = 1.0
@dataclass
class MarketDataMsg(Message, ABC):
"""
Base class for returning market data subscription results from an ``ExchangeAgent``.
The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the
subscription data but are included for bookkeeping purposes.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
"""
symbol: str
last_transaction: int
exchange_ts: NanosecondTime
@dataclass
class MarketDataEventMsg(MarketDataMsg, ABC):
"""
Base class for returning market data subscription results from an ``ExchangeAgent``.
The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the
subscription data but are included for bookkeeping purposes.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
stage: The stage of this event (start or finish).
"""
class Stage(Enum):
START = "START"
FINISH = "FINISH"
stage: Stage
@dataclass
class L1DataMsg(MarketDataMsg):
"""
This message returns L1 order book data as part of an L1 data subscription.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
bid: The best bid price and the available volume at that price.
ask: The best ask price and the available volume at that price.
"""
# Inherited Fields:
# symbol: str
# last_transaction: int
# exchange_ts: NanosecondTime
bid: Tuple[int, int]
ask: Tuple[int, int]
@dataclass
class L2DataMsg(MarketDataMsg):
"""
This message returns L2 order book data as part of an L2 data subscription.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
bids: A list of tuples containing the price and available volume at each bid
price level.
asks: A list of tuples containing the price and available volume at each ask
price level.
"""
# Inherited Fields:
# symbol: str
# last_transaction: int
# exchange_ts: NanosecondTime
bids: List[Tuple[int, int]]
asks: List[Tuple[int, int]]
# TODO: include requested depth
@dataclass
class L3DataMsg(MarketDataMsg):
"""
This message returns L3 order book data as part of an L3 data subscription.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
bids: A list of tuples containing the price and a list of order sizes at each
bid price level.
asks: A list of tuples containing the price and a list of order sizes at each
ask price level.
"""
# Inherited Fields:
# symbol: str
# last_transaction: int
# exchange_ts: NanosecondTime
bids: List[Tuple[int, List[int]]]
asks: List[Tuple[int, List[int]]]
# TODO: include requested depth
@dataclass
class TransactedVolDataMsg(MarketDataMsg):
"""
This message returns order book transacted volume data as part of an transacted
volume data subscription.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
bid_volume: The total transacted volume of bid orders for the given lookback period.
ask_volume: The total transacted volume of ask orders for the given lookback period.
"""
# Inherited Fields:
# symbol: str
# last_transaction: int
# exchange_ts: NanosecondTime
bid_volume: int
ask_volume: int
# TODO: include lookback period
@dataclass
class BookImbalanceDataMsg(MarketDataEventMsg):
"""
Sent when the book imbalance reaches a certain threshold dictated in the
subscription request message.
Attributes:
symbol: The symbol of the security this data is for.
last_transaction: The time of the last transaction that happened on the exchange.
exchange_ts: The time that the message was sent from the exchange.
stage: The stage of this event (start or finish).
imbalance: Proportional size of the imbalance.
side: Side of the book that the imbalance is towards.
"""
# Inherited Fields:
# symbol: str
# last_transaction: int
# exchange_ts: pd.Timestamp
# stage: MarketDataEventMsg.Stage
imbalance: float
side: Side
| 10,549 | 30.969697 | 92 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/__init__.py
| 0 | 0 | 0 |
py
|
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/market.py
|
from dataclasses import dataclass
from typing import Dict, Optional
from abides_core import Message, NanosecondTime
@dataclass
class MarketClosedMsg(Message):
"""
This message is sent from an ``ExchangeAgent`` to a ``TradingAgent`` when a ``TradingAgent`` has
made a request that cannot be completed because the market the ``ExchangeAgent`` trades
is closed.
"""
pass
@dataclass
class MarketHoursRequestMsg(Message):
"""
This message can be sent to an ``ExchangeAgent`` to query the opening hours of the market
it trades. A ``MarketHoursMsg`` is sent in response.
"""
pass
@dataclass
class MarketHoursMsg(Message):
"""
This message is sent by an ``ExchangeAgent`` in response to a ``MarketHoursRequestMsg``
message sent from a ``TradingAgent``.
Attributes:
mkt_open: The time that the market traded by the ``ExchangeAgent`` opens.
mkt_close: The time that the market traded by the ``ExchangeAgent`` closes.
"""
mkt_open: NanosecondTime
mkt_close: NanosecondTime
@dataclass
class MarketClosePriceRequestMsg(Message):
"""
This message can be sent to an ``ExchangeAgent`` to request that the close price of
the market is sent when the exchange closes. This is used to accurately calculate
the agent's final mark-to-market value.
"""
@dataclass
class MarketClosePriceMsg(Message):
"""
This message is sent by an ``ExchangeAgent`` when the exchange closes to all agents
that habve requested this message. The value is used to accurately calculate the
agent's final mark-to-market value.
Attributes:
close_prices: A mapping of symbols to closing prices.
"""
close_prices: Dict[str, Optional[int]]
| 1,752 | 26.390625 | 100 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/query.py
|
from abc import ABC
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from abides_core import Message
@dataclass
class QueryMsg(Message, ABC):
symbol: str
@dataclass
class QueryResponseMsg(Message, ABC):
symbol: str
mkt_closed: bool
@dataclass
class QueryLastTradeMsg(QueryMsg):
# Inherited Fields:
# symbol: str
pass
@dataclass
class QueryLastTradeResponseMsg(QueryResponseMsg):
# Inherited Fields:
# symbol: str
# mkt_closed: bool
last_trade: Optional[int]
@dataclass
class QuerySpreadMsg(QueryMsg):
# Inherited Fields:
# symbol: str
depth: int
@dataclass
class QuerySpreadResponseMsg(QueryResponseMsg):
# Inherited Fields:
# symbol: str
# mkt_closed: bool
depth: int
bids: List[Tuple[int, int]]
asks: List[Tuple[int, int]]
last_trade: Optional[int]
@dataclass
class QueryOrderStreamMsg(QueryMsg):
# Inherited Fields:
# symbol: str
length: int
@dataclass
class QueryOrderStreamResponseMsg(QueryResponseMsg):
# Inherited Fields:
# symbol: str
# mkt_closed: bool
length: int
orders: List[Dict[str, Any]]
@dataclass
class QueryTransactedVolMsg(QueryMsg):
# Inherited Fields:
# symbol: str
lookback_period: str
@dataclass
class QueryTransactedVolResponseMsg(QueryResponseMsg):
# Inherited Fields:
# symbol: str
# mkt_closed: bool
bid_volume: int
ask_volume: int
| 1,461 | 16.829268 | 54 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/messages/order.py
|
from abc import ABC
from dataclasses import dataclass
from abides_core import Message
from ..orders import LimitOrder, MarketOrder
@dataclass
class OrderMsg(Message, ABC):
pass
@dataclass
class LimitOrderMsg(OrderMsg):
order: LimitOrder
@dataclass
class MarketOrderMsg(OrderMsg):
order: MarketOrder
@dataclass
class CancelOrderMsg(OrderMsg):
order: LimitOrder
tag: str
metadata: dict
@dataclass
class PartialCancelOrderMsg(OrderMsg):
order: LimitOrder
quantity: int
tag: str
metadata: dict
@dataclass
class ModifyOrderMsg(OrderMsg):
old_order: LimitOrder
new_order: LimitOrder
@dataclass
class ReplaceOrderMsg(OrderMsg):
agent_id: int
old_order: LimitOrder
new_order: LimitOrder
| 755 | 14.12 | 44 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/models/order_size_model.py
|
import json
import numpy as np
from pomegranate import GeneralMixtureModel
order_size = {
"class": "GeneralMixtureModel",
"distributions": [
{
"class": "Distribution",
"name": "LogNormalDistribution",
"parameters": [2.9, 1.2],
"frozen": False,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [100.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [200.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [300.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [400.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [500.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [600.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [700.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [800.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [900.0, 0.15],
"frozen": True,
},
{
"class": "Distribution",
"name": "NormalDistribution",
"parameters": [1000.0, 0.15],
"frozen": True,
},
],
"weights": [
0.2,
0.7,
0.06,
0.004,
0.0329,
0.001,
0.0006,
0.0004,
0.0005,
0.0003,
0.0003,
],
}
class OrderSizeModel:
def __init__(self) -> None:
self.model = GeneralMixtureModel.from_json(json.dumps(order_size))
def sample(self, random_state: np.random.RandomState) -> float:
return round(self.model.sample(random_state=random_state))
| 2,479 | 24.050505 | 74 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/models/__init__.py
|
from .order_size_model import OrderSizeModel
| 45 | 22 | 44 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/oracles/oracle.py
|
from abides_core import NanosecondTime
class Oracle:
def get_daily_open_price(
self, symbol: str, mkt_open: NanosecondTime, cents: bool = True
) -> int:
raise NotImplementedError
| 205 | 21.888889 | 71 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/oracles/mean_reverting_oracle.py
|
import datetime as dt
import logging
from math import sqrt
from typing import Any, Dict, Optional
import numpy as np
import pandas as pd
from abides_core import NanosecondTime
from .oracle import Oracle
logger = logging.getLogger(__name__)
class MeanRevertingOracle(Oracle):
"""The MeanRevertingOracle requires three parameters: a mean fundamental value,
a mean reversion coefficient, and a shock variance. It constructs and retains
a fundamental value time series for each requested symbol, and provides noisy
observations of those values upon agent request. The expectation is that
agents using such an oracle will know the mean-reverting equation and all
relevant parameters, but will not know the random shocks applied to the
sequence at each time step.
Historical dates are effectively meaningless to this oracle. It is driven by
the numpy random number seed contained within the experimental config file.
This oracle uses the nanoseconds portion of the current simulation time as
discrete "time steps". A suggestion: to keep wallclock runtime reasonable,
have the agents operate for only ~1000 nanoseconds, but interpret nanoseconds
as seconds or minutes."""
def __init__(
self,
mkt_open: NanosecondTime,
mkt_close: NanosecondTime,
symbols: Dict[str, Dict[str, Any]],
) -> None:
# Symbols must be a dictionary of dictionaries with outer keys as symbol names and
# inner keys: r_bar, kappa, sigma_s.
self.mkt_open: NanosecondTime = mkt_open
self.mkt_close: NanosecondTime = mkt_close
self.symbols: Dict[str, Dict[str, Any]] = symbols
# The dictionary r holds the fundamenal value series for each symbol.
self.r: Dict[str, pd.Series] = {}
then = dt.datetime.now()
for symbol in symbols:
s = symbols[symbol]
logger.debug(
"MeanRevertingOracle computing fundamental value series for {}", symbol
)
self.r[symbol] = self.generate_fundamental_value_series(symbol=symbol, **s)
now = dt.datetime.now()
logger.debug("MeanRevertingOracle initialized for symbols {}", symbols)
logger.debug("MeanRevertingOracle initialization took {}", now - then)
def generate_fundamental_value_series(
self, symbol: str, r_bar: int, kappa: float, sigma_s: float
) -> pd.Series:
"""Generates the fundamental value series for a single stock symbol.
Arguments:
symbol: The symbold to calculate the fundamental value series for.
r_bar: The mean fundamental value.
kappa: The mean reversion coefficient.
sigma_s: The shock variance. (Note: NOT STANDARD DEVIATION)
Because the oracle uses the global np.random PRNG to create the
fundamental value series, it is important to create the oracle BEFORE
the agents. In this way the addition of a new agent will not affect the
sequence created. (Observations using the oracle will use an agent's
PRNG and thus not cause a problem.)
"""
# Turn variance into std.
sigma_s = sqrt(sigma_s)
# Create the time series into which values will be projected and initialize the first value.
date_range = pd.date_range(
self.mkt_open, self.mkt_close, closed="left", freq="N"
)
s = pd.Series(index=date_range)
r = np.zeros(len(s.index))
r[0] = r_bar
# Predetermine the random shocks for all time steps (at once, for computation speed).
shock = np.random.normal(scale=sigma_s, size=(r.shape[0]))
# Compute the mean reverting fundamental value series.
for t in range(1, r.shape[0]):
r[t] = max(0, (kappa * r_bar) + ((1 - kappa) * r[t - 1]) + shock[t])
# Replace the series values with the fundamental value series. Round and convert to
# integer cents.
s[:] = np.round(r)
return s.astype(int)
def get_daily_open_price(
self, symbol: str, mkt_open: NanosecondTime, cents: bool = True
) -> int:
"""Return the daily open price for the symbol given.
In the case of the MeanRevertingOracle, this will simply be the first
fundamental value, which is also the fundamental mean. We will use the
mkt_open time as given, however, even if it disagrees with this.
"""
# If we did not already know mkt_open, we should remember it.
if (mkt_open is not None) and (self.mkt_open is None):
self.mkt_open = mkt_open
logger.debug(
"Oracle: client requested {symbol} at market open: {}", self.mkt_open
)
open_price = self.r[symbol].loc[self.mkt_open]
logger.debug("Oracle: market open price was was {}", open_price)
return open_price
def observe_price(
self,
symbol: str,
current_time: NanosecondTime,
random_state: np.random.RandomState,
sigma_n: int = 1000,
) -> int:
"""Return a noisy observation of the current fundamental value.
While the fundamental value for a given equity at a given time step does
not change, multiple agents observing that value will receive different
observations.
Only the Exchange or other privileged agents should use noisy=False.
sigma_n is experimental observation variance. NOTE: NOT STANDARD DEVIATION.
Each agent must pass its RandomState object to ``observe_price``. This
ensures that each agent will receive the same answers across multiple
same-seed simulations even if a new agent has been added to the experiment.
"""
# If the request is made after market close, return the close price.
if current_time >= self.mkt_close:
r_t = self.r[symbol].loc[self.mkt_close - 1]
else:
r_t = self.r[symbol].loc[current_time]
# Generate a noisy observation of fundamental value at the current time.
if sigma_n == 0:
obs = r_t
else:
obs = int(round(random_state.normal(loc=r_t, scale=sqrt(sigma_n))))
logger.debug("Oracle: current fundamental value is {} at {}", r_t, current_time)
logger.debug("Oracle: giving client value observation {}", obs)
# Reminder: all simulator prices are specified in integer cents.
return obs
| 6,502 | 37.94012 | 100 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/oracles/__init__.py
|
# from .data_oracle import DataOracle
# from .external_file_oracle import ExternalFileOracle
from .mean_reverting_oracle import MeanRevertingOracle
from .oracle import Oracle
from .sparse_mean_reverting_oracle import SparseMeanRevertingOracle
| 243 | 39.666667 | 67 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/oracles/sparse_mean_reverting_oracle.py
|
import datetime as dt
import logging
from math import exp, sqrt
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from abides_core import NanosecondTime
from .mean_reverting_oracle import MeanRevertingOracle
logger = logging.getLogger(__name__)
class SparseMeanRevertingOracle(MeanRevertingOracle):
"""The SparseMeanRevertingOracle produces a fundamental value time series for
each requested symbol, and provides noisy observations of the fundamental
value upon agent request. This "sparse discrete" fundamental uses a
combination of two processes to produce relatively realistic synthetic
"values": a continuous mean-reverting Ornstein-Uhlenbeck process plus
periodic "megashocks" which arrive following a Poisson process and have
magnitude drawn from a bimodal normal distribution (overall mean zero,
but with modes well away from zero). This is necessary because OU itself
is a single noisy return to the mean (from a perturbed initial state)
that does not then depart the mean except in terms of minor "noise".
Historical dates are effectively meaningless to this oracle. It is driven by
the numpy random number seed contained within the experimental config file.
This oracle uses the nanoseconds portion of the current simulation time as
discrete "time steps".
This version of the MeanRevertingOracle expects agent activity to be spread
across a large amount of time, with relatively sparse activity. That is,
agents each acting at realistic "retail" intervals, on the order of seconds
or minutes, spread out across the day.
"""
def __init__(
self,
mkt_open: NanosecondTime,
mkt_close: NanosecondTime,
symbols: Dict[str, Dict[str, Any]],
) -> None:
# Symbols must be a dictionary of dictionaries with outer keys as symbol names and
# inner keys: r_bar, kappa, sigma_s.
self.mkt_open: NanosecondTime = mkt_open
self.mkt_close: NanosecondTime = mkt_close
self.symbols: Dict[str, Dict[str, Any]] = symbols
self.f_log: Dict[str, List[Dict[str, Any]]] = {}
# The dictionary r holds the most recent fundamental values for each symbol.
self.r: Dict[str, pd.Series] = {}
# The dictionary megashocks holds the time series of megashocks for each symbol.
# The last one will always be in the future (relative to the current simulation time).
#
# Without these, the OU process just makes a noisy return to the mean and then stays there
# with relatively minor noise. Here we want them to follow a Poisson process, so we sample
# from an exponential distribution for the separation intervals.
self.megashocks: Dict[str, List[Dict[str, Any]]] = {}
then = dt.datetime.now()
# Note that each value in the self.r dictionary is a 2-tuple of the timestamp at
# which the series was computed and the true fundamental value at that time.
for symbol in symbols:
s = symbols[symbol]
logger.debug(
"SparseMeanRevertingOracle computing initial fundamental value for {}".format(
symbol
)
)
self.r[symbol] = (mkt_open, s["r_bar"])
self.f_log[symbol] = [
{"FundamentalTime": mkt_open, "FundamentalValue": s["r_bar"]}
]
# Compute the time and value of the first megashock. Note that while the values are
# mean-zero, they are intentionally bimodal (i.e. we always want to push the stock
# some, but we will tend to cancel out via pushes in opposite directions).
ms_time_delta = np.random.exponential(scale=1.0 / s["megashock_lambda_a"])
mst = self.mkt_open + ms_time_delta
msv = s["random_state"].normal(
loc=s["megashock_mean"], scale=sqrt(s["megashock_var"])
)
msv = msv if s["random_state"].randint(2) == 0 else -msv
self.megashocks[symbol] = [{"MegashockTime": mst, "MegashockValue": msv}]
now = dt.datetime.now()
logger.debug(
"SparseMeanRevertingOracle initialized for symbols {}".format(symbols)
)
logger.debug(
"SparseMeanRevertingOracle initialization took {}".format(now - then)
)
def compute_fundamental_at_timestamp(
self, ts: NanosecondTime, v_adj, symbol: str, pt: NanosecondTime, pv
) -> int:
"""
Arguments:
ts: A requested timestamp to which we should advance the fundamental.
v_adj: A value adjustment to apply after advancing time (must pass zero if none).
symbol: A symbol for which to advance time.
pt: A previous timestamp.
pv: A previous fundamental.
Returns:
The new value.
The last two parameters should relate to the most recent time this method was invoked.
As a side effect, it updates the log of computed fundamental values.
"""
s = self.symbols[symbol]
# This oracle uses the Ornstein-Uhlenbeck Process. It is quite close to being a
# continuous version of the discrete mean reverting process used in the regular
# (dense) MeanRevertingOracle.
# Compute the time delta from the previous time to the requested time.
d = ts - pt
# Extract the parameters for the OU process update.
mu = s["r_bar"]
gamma = s["kappa"]
theta = s["fund_vol"] # the volatility value for the generated time-series.
# The OU process is able to skip any amount of time and sample the next desired value
# from the appropriate distribution of possible values.
v = s["random_state"].normal(
loc=mu + (pv - mu) * (exp(-gamma * d)),
scale=sqrt(((theta**2) / (2 * gamma)) * (1 - exp(-2 * gamma * d))),
)
# Apply the value adjustment that was passed in.
v += v_adj
# The process is not permitted to become negative.
v = max(0, v)
# For our purposes, the value must be rounded and converted to integer cents.
v = int(round(v))
# Cache the new time and value as the "previous" fundamental values.
self.r[symbol] = (ts, v)
# Append the change to the permanent log of fundamental values for this symbol.
self.f_log[symbol].append({"FundamentalTime": ts, "FundamentalValue": v})
# Return the new value for the requested timestamp.
return v
def advance_fundamental_value_series(
self, current_time: NanosecondTime, symbol: str
) -> int:
"""This method advances the fundamental value series for a single stock symbol,
using the OU process. It may proceed in several steps due to our periodic
application of "megashocks" to push the stock price around, simulating
exogenous forces."""
# Generation of the fundamental value series uses a separate random state object
# per symbol, which is part of the dictionary we maintain for each symbol.
# Agent observations using the oracle will use an agent's random state object.
s = self.symbols[symbol]
# This is the previous fundamental time and value.
pt, pv = self.r[symbol]
# If time hasn't changed since the last advance, just use the current value.
if current_time <= pt:
return pv
# Otherwise, we have some work to do, advancing time and computing the fundamental.
# We may not jump straight to the requested time, because we periodically apply
# megashocks to push the series around (not always away from the mean) and we need
# to compute OU at each of those times, so the aftereffects of the megashocks
# properly affect the remaining OU interval.
mst = self.megashocks[symbol][-1]["MegashockTime"]
msv = self.megashocks[symbol][-1]["MegashockValue"]
while mst < current_time:
# A megashock is scheduled to occur before the new time to which we are advancing. Handle it.
# Advance time from the previous time to the time of the megashock using the OU process and
# then applying the next megashock value.
v = self.compute_fundamental_at_timestamp(mst, msv, symbol, pt, pv)
# Update our "previous" values for the next computation.
pt, pv = mst, v
# Since we just surpassed the last megashock time, compute the next one, which we might or
# might not immediately consume. This works just like the first time (in __init__()).
mst = pt + int(np.random.exponential(scale=1.0 / s["megashock_lambda_a"]))
msv = s["random_state"].normal(
loc=s["megashock_mean"], scale=sqrt(s["megashock_var"])
)
msv = msv if s["random_state"].randint(2) == 0 else -msv
self.megashocks[symbol].append(
{"MegashockTime": mst, "MegashockValue": msv}
)
# The loop will continue until there are no more megashocks before the time requested
# by the calling method.
# Once there are no more megashocks to apply (i.e. the next megashock is in the future, after
# current_time), then finally advance using the OU process to the requested time.
v = self.compute_fundamental_at_timestamp(current_time, 0, symbol, pt, pv)
return v
def get_daily_open_price(
self, symbol: str, mkt_open: NanosecondTime, cents: bool = True
) -> int:
"""Return the daily open price for the symbol given.
In the case of the MeanRevertingOracle, this will simply be the first
fundamental value, which is also the fundamental mean. We will use the
mkt_open time as given, however, even if it disagrees with this.
"""
# The sparse oracle doesn't maintain full fundamental value history, but rather
# advances on demand keeping only the most recent price, except for the opening
# price. Thus we cannot honor a mkt_open that isn't what we already expected.
logger.debug(
"Oracle: client requested {} at market open: {}".format(
symbol, self.mkt_open
)
)
open_price = self.symbols[symbol]["r_bar"]
logger.debug("Oracle: market open price was was {}".format(open_price))
return open_price
def observe_price(
self,
symbol: str,
current_time: NanosecondTime,
random_state: np.random.RandomState,
sigma_n: int = 1000,
) -> int:
"""Return a noisy observation of the current fundamental value.
While the fundamental value for a given equity at a given time step does
not change, multiple agents observing that value will receive different
observations.
Only the Exchange or other privileged agents should use sigma_n==0.
sigma_n is experimental observation variance. NOTE: NOT STANDARD DEVIATION.
Each agent must pass its RandomState object to observe_price. This ensures that
each agent will receive the same answers across multiple same-seed simulations
even if a new agent has been added to the experiment.
"""
# If the request is made after market close, return the close price.
if current_time >= self.mkt_close:
r_t = self.advance_fundamental_value_series(self.mkt_close - 1, symbol)
else:
r_t = self.advance_fundamental_value_series(current_time, symbol)
# Generate a noisy observation of fundamental value at the current time.
if sigma_n == 0:
obs = r_t
else:
obs = int(round(random_state.normal(loc=r_t, scale=sqrt(sigma_n))))
logger.debug(
"Oracle: current fundamental value is {} at {}".format(r_t, current_time)
)
logger.debug("Oracle: giving client value observation {}".format(obs))
# Reminder: all simulator prices are specified in integer cents.
return obs
| 12,258 | 41.418685 | 106 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/noise_agent.py
|
import logging
from typing import Optional
import numpy as np
from abides_core import Message, NanosecondTime
from ..generators import OrderSizeGenerator
from ..messages.query import QuerySpreadResponseMsg
from ..orders import Side
from .trading_agent import TradingAgent
logger = logging.getLogger(__name__)
class NoiseAgent(TradingAgent):
"""
Noise agent implement simple strategy. The agent wakes up once and places 1 order.
"""
def __init__(
self,
id: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
symbol: str = "IBM",
starting_cash: int = 100000,
log_orders: bool = False,
order_size_model: Optional[OrderSizeGenerator] = None,
wakeup_time: Optional[NanosecondTime] = None,
) -> None:
# Base class init.
super().__init__(id, name, type, random_state, starting_cash, log_orders)
self.wakeup_time: NanosecondTime = wakeup_time
self.symbol: str = symbol # symbol to trade
# The agent uses this to track whether it has begun its strategy or is still
# handling pre-market tasks.
self.trading: bool = False
# The agent begins in its "complete" state, not waiting for
# any special event or condition.
self.state: str = "AWAITING_WAKEUP"
# The agent must track its previous wake time, so it knows how many time
# units have passed.
self.prev_wake_time: Optional[NanosecondTime] = None
self.size: Optional[int] = (
self.random_state.randint(20, 50) if order_size_model is None else None
)
self.order_size_model = order_size_model # Probabilistic model for order size
def kernel_starting(self, start_time: NanosecondTime) -> None:
# self.kernel is set in Agent.kernel_initializing()
# self.exchange_id is set in TradingAgent.kernel_starting()
super().kernel_starting(start_time)
self.oracle = self.kernel.oracle
def kernel_stopping(self) -> None:
# Always call parent method to be safe.
super().kernel_stopping()
# Fix the problem of logging an agent that has not waken up
try:
# noise trader surplus is marked to EOD
bid, bid_vol, ask, ask_vol = self.get_known_bid_ask(self.symbol)
except KeyError:
self.logEvent("FINAL_VALUATION", self.starting_cash, True)
else:
# Print end of day valuation.
H = int(round(self.get_holdings(self.symbol), -2) / 100)
if bid and ask:
rT = int(bid + ask) / 2
else:
rT = self.last_trade[self.symbol]
# final (real) fundamental value times shares held.
surplus = rT * H
logger.debug("Surplus after holdings: {}", surplus)
# Add ending cash value and subtract starting cash value.
surplus += self.holdings["CASH"] - self.starting_cash
surplus = float(surplus) / self.starting_cash
self.logEvent("FINAL_VALUATION", surplus, True)
logger.debug(
"{} final report. Holdings: {}, end cash: {}, start cash: {}, final fundamental: {}, surplus: {}",
self.name,
H,
self.holdings["CASH"],
self.starting_cash,
rT,
surplus,
)
def wakeup(self, current_time: NanosecondTime) -> None:
# Parent class handles discovery of exchange times and market_open wakeup call.
super().wakeup(current_time)
self.state = "INACTIVE"
if not self.mkt_open or not self.mkt_close:
# TradingAgent handles discovery of exchange times.
return
else:
if not self.trading:
self.trading = True
# Time to start trading!
logger.debug("{} is ready to start trading now.", self.name)
# Steady state wakeup behavior starts here.
# If we've been told the market has closed for the day, we will only request
# final price information, then stop.
if self.mkt_closed and (self.symbol in self.daily_close_price):
# Market is closed and we already got the daily close price.
return
if self.wakeup_time > current_time:
self.set_wakeup(self.wakeup_time)
return
if self.mkt_closed and self.symbol not in self.daily_close_price:
self.get_current_spread(self.symbol)
self.state = "AWAITING_SPREAD"
return
if type(self) == NoiseAgent:
self.get_current_spread(self.symbol)
self.state = "AWAITING_SPREAD"
else:
self.state = "ACTIVE"
def placeOrder(self) -> None:
# place order in random direction at a mid
buy_indicator = self.random_state.randint(0, 1 + 1)
bid, bid_vol, ask, ask_vol = self.get_known_bid_ask(self.symbol)
if self.order_size_model is not None:
self.size = self.order_size_model.sample(random_state=self.random_state)
if self.size > 0:
if buy_indicator == 1 and ask:
self.place_limit_order(self.symbol, self.size, Side.BID, ask)
elif not buy_indicator and bid:
self.place_limit_order(self.symbol, self.size, Side.ASK, bid)
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
# Parent class schedules market open wakeup call once market open/close times are known.
super().receive_message(current_time, sender_id, message)
# We have been awakened by something other than our scheduled wakeup.
# If our internal state indicates we were waiting for a particular event,
# check if we can transition to a new state.
if self.state == "AWAITING_SPREAD":
# We were waiting to receive the current spread/book. Since we don't currently
# track timestamps on retained information, we rely on actually seeing a
# QUERY_SPREAD response message.
if isinstance(message, QuerySpreadResponseMsg):
# This is what we were waiting for.
# But if the market is now closed, don't advance to placing orders.
if self.mkt_closed:
return
# We now have the information needed to place a limit order with the eta
# strategic threshold parameter.
self.placeOrder()
self.state = "AWAITING_WAKEUP"
# Internal state and logic specific to this agent subclass.
def get_wake_frequency(self) -> NanosecondTime:
return self.random_state.randint(low=0, high=100)
| 6,925 | 34.88601 | 115 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/trading_agent.py
|
import logging
import sys
import warnings
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy as np
from abides_core import Message, NanosecondTime
from abides_core.utils import fmt_ts
from ..messages.market import (
MarketClosePriceRequestMsg,
MarketClosePriceMsg,
MarketClosedMsg,
MarketHoursRequestMsg,
MarketHoursMsg,
)
from ..messages.marketdata import MarketDataSubReqMsg, MarketDataMsg, L2DataMsg
from ..messages.order import (
LimitOrderMsg,
MarketOrderMsg,
PartialCancelOrderMsg,
CancelOrderMsg,
ModifyOrderMsg,
ReplaceOrderMsg,
)
from ..messages.orderbook import (
OrderAcceptedMsg,
OrderExecutedMsg,
OrderCancelledMsg,
OrderPartialCancelledMsg,
OrderModifiedMsg,
OrderReplacedMsg,
)
from ..messages.query import (
QueryLastTradeMsg,
QueryLastTradeResponseMsg,
QuerySpreadMsg,
QuerySpreadResponseMsg,
QueryOrderStreamMsg,
QueryOrderStreamResponseMsg,
QueryTransactedVolMsg,
QueryTransactedVolResponseMsg,
)
from ..orders import Order, LimitOrder, MarketOrder, Side
from .financial_agent import FinancialAgent
from .exchange_agent import ExchangeAgent
logger = logging.getLogger(__name__)
class TradingAgent(FinancialAgent):
"""
The TradingAgent class (via FinancialAgent, via Agent) is intended as the
base class for all trading agents (i.e. not things like exchanges) in a
market simulation.
It handles a lot of messaging (inbound and outbound) and state maintenance
automatically, so subclasses can focus just on implementing a strategy without
too much bookkeeping.
"""
def __init__(
self,
id: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
starting_cash: int = 100000,
log_orders: bool = False,
) -> None:
# Base class init.
super().__init__(id, name, type, random_state)
# We don't yet know when the exchange opens or closes.
self.mkt_open: Optional[NanosecondTime] = None
self.mkt_close: Optional[NanosecondTime] = None
# Log order activity?
self.log_orders: bool = log_orders
# Log all activity to file?
if log_orders is None:
self.log_orders = False
self.log_to_file = False
# Store starting_cash in case we want to refer to it for performance stats.
# It should NOT be modified. Use the 'CASH' key in self.holdings.
# 'CASH' is always in cents! Note that agents are limited by their starting
# cash, currently without leverage. Taking short positions is permitted,
# but does NOT increase the amount of at-risk capital allowed.
self.starting_cash: int = starting_cash
# TradingAgent has constants to support simulated market orders.
self.MKT_BUY = sys.maxsize
self.MKT_SELL = 0
# The base TradingAgent will track its holdings and outstanding orders.
# Holdings is a dictionary of symbol -> shares. CASH is a special symbol
# worth one cent per share. Orders is a dictionary of active, open orders
# (not cancelled, not fully executed) keyed by order_id.
self.holdings: Dict[str, int] = {"CASH": starting_cash}
self.orders: Dict[int, Order] = {}
# The base TradingAgent also tracks last known prices for every symbol
# for which it has received as QUERY_LAST_TRADE message. Subclass
# agents may use or ignore this as they wish. Note that the subclass
# agent must request pricing when it wants it. This agent does NOT
# automatically generate such requests, though it has a helper function
# that can be used to make it happen.
self.last_trade: Dict[str, int] = {}
# used in subscription mode to record the timestamp for which the data was current in the ExchangeAgent
self.exchange_ts: Dict[str, NanosecondTime] = {}
# When a last trade price comes in after market close, the trading agent
# automatically records it as the daily close price for a symbol.
self.daily_close_price: Dict[str, int] = {}
self.nav_diff: int = 0
self.basket_size: int = 0
# The agent remembers the last known bids and asks (with variable depth,
# showing only aggregate volume at each price level) when it receives
# a response to QUERY_SPREAD.
self.known_bids: Dict = {}
self.known_asks: Dict = {}
# The agent remembers the order history communicated by the exchange
# when such is requested by an agent (for example, a heuristic belief
# learning agent).
self.stream_history: Dict[str, Any] = {}
# The agent records the total transacted volume in the exchange for a given symbol and lookback period
self.transacted_volume: Dict = {}
# Each agent can choose to log the orders executed
self.executed_orders: List = []
# For special logging at the first moment the simulator kernel begins
# running (which is well after agent init), it is useful to keep a simple
# boolean flag.
self.first_wake: bool = True
# Remember whether we have already passed the exchange close time, as far
# as we know.
self.mkt_closed: bool = False
# Simulation lifecycle messages.
def kernel_starting(self, start_time: NanosecondTime) -> None:
"""
Arguments:
start_time: The time that the simulation started.
"""
assert self.kernel is not None
# self.kernel is set in Agent.kernel_initializing()
self.logEvent("STARTING_CASH", self.starting_cash, True)
# Find an exchange with which we can place orders. It is guaranteed
# to exist by now (if there is one).
self.exchange_id: int = self.kernel.find_agents_by_type(ExchangeAgent)[0]
logger.debug(
f"Agent {self.id} requested agent of type Agent.ExchangeAgent. Given Agent ID: {self.exchange_id}"
)
# Request a wake-up call as in the base Agent.
super().kernel_starting(start_time)
def kernel_stopping(self) -> None:
# Always call parent method to be safe.
super().kernel_stopping()
assert self.kernel is not None
# Print end of day holdings.
self.logEvent(
"FINAL_HOLDINGS", self.fmt_holdings(self.holdings), deepcopy_event=False
)
self.logEvent("FINAL_CASH_POSITION", self.holdings["CASH"], True)
# Mark to market.
cash = self.mark_to_market(self.holdings)
self.logEvent("ENDING_CASH", cash, True)
logger.debug(
"Final holdings for {}: {}. Marked to market: {}".format(
self.name, self.fmt_holdings(self.holdings), cash
)
)
# Record final results for presentation/debugging. This is an ugly way
# to do this, but it is useful for now.
mytype = self.type
gain = cash - self.starting_cash
if mytype in self.kernel.mean_result_by_agent_type:
self.kernel.mean_result_by_agent_type[mytype] += gain
self.kernel.agent_count_by_type[mytype] += 1
else:
self.kernel.mean_result_by_agent_type[mytype] = gain
self.kernel.agent_count_by_type[mytype] = 1
# Simulation participation messages.
def wakeup(self, current_time: NanosecondTime) -> bool:
"""
Arguments:
current_time: The time that this agent was woken up by the kernel.
Returns:
For the sake of subclasses, TradingAgent now returns a boolean
indicating whether the agent is "ready to trade" -- has it received
the market open and closed times, and is the market not already closed.
"""
super().wakeup(current_time)
if self.first_wake:
# Log initial holdings.
self.logEvent("HOLDINGS_UPDATED", self.holdings)
self.first_wake = False
# Tell the exchange we want to be sent the final prices when the market closes.
self.send_message(self.exchange_id, MarketClosePriceRequestMsg())
if self.mkt_open is None:
# Ask our exchange when it opens and closes.
self.send_message(self.exchange_id, MarketHoursRequestMsg())
return (self.mkt_open and self.mkt_close) and not self.mkt_closed
def request_data_subscription(
self, subscription_message: MarketDataSubReqMsg
) -> None:
"""
Used by any Trading Agent subclass to create a subscription to market data from
the Exchange Agent.
Arguments:
subscription_message: An instance of a MarketDataSubReqMessage.
"""
subscription_message.cancel = False
self.send_message(recipient_id=self.exchange_id, message=subscription_message)
def cancel_data_subscription(
self, subscription_message: MarketDataSubReqMsg
) -> None:
"""
Used by any Trading Agent subclass to cancel subscription to market data from
the Exchange Agent.
Arguments:
subscription_message: An instance of a MarketDataSubReqMessage.
"""
subscription_message.cancel = True
self.send_message(recipient_id=self.exchange_id, message=subscription_message)
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""
Arguments:
current_time: The time that this agent received the message.
sender_id: The ID of the agent who sent the message.
message: The message contents.
"""
assert self.kernel is not None
super().receive_message(current_time, sender_id, message)
# Do we know the market hours?
had_mkt_hours = self.mkt_open is not None and self.mkt_close is not None
# Record market open or close times.
if isinstance(message, MarketHoursMsg):
self.mkt_open = message.mkt_open
self.mkt_close = message.mkt_close
logger.debug("Recorded market open: {}".format(fmt_ts(self.mkt_open)))
logger.debug("Recorded market close: {}".format(fmt_ts(self.mkt_close)))
elif isinstance(message, MarketClosePriceMsg):
# Update our local last trade prices with the accurate last trade prices from
# the exchange so we can accurately calculate our mark-to-market values.
for symbol, close_price in message.close_prices.items():
self.last_trade[symbol] = close_price
elif isinstance(message, MarketClosedMsg):
# We've tried to ask the exchange for something after it closed. Remember this
# so we stop asking for things that can't happen.
self.market_closed()
elif isinstance(message, OrderExecutedMsg):
# Call the order_executed method, which subclasses should extend. This parent
# class could implement default "portfolio tracking" or "returns tracking"
# behavior.
self.order_executed(message.order)
elif isinstance(message, OrderAcceptedMsg):
# Call the order_accepted method, which subclasses should extend.
self.order_accepted(message.order)
elif isinstance(message, OrderCancelledMsg):
# Call the order_cancelled method, which subclasses should extend.
self.order_cancelled(message.order)
elif isinstance(message, OrderPartialCancelledMsg):
# Call the order_cancelled method, which subclasses should extend.
self.order_partial_cancelled(message.new_order)
elif isinstance(message, OrderModifiedMsg):
# Call the order_cancelled method, which subclasses should extend.
self.order_modified(message.new_order)
elif isinstance(message, OrderReplacedMsg):
# Call the order_cancelled method, which subclasses should extend.
self.order_replaced(message.old_order, message.new_order)
elif isinstance(message, QueryLastTradeResponseMsg):
# Call the query_last_trade method, which subclasses may extend.
# Also note if the market is closed.
if message.mkt_closed:
self.mkt_closed = True
self.query_last_trade(message.symbol, message.last_trade)
elif isinstance(message, QuerySpreadResponseMsg):
# Call the query_spread method, which subclasses may extend.
# Also note if the market is closed.
if message.mkt_closed:
self.mkt_closed = True
self.query_spread(
message.symbol, message.last_trade, message.bids, message.asks, ""
)
elif isinstance(message, QueryOrderStreamResponseMsg):
# Call the query_order_stream method, which subclasses may extend.
# Also note if the market is closed.
if message.mkt_closed:
self.mkt_closed = True
self.query_order_stream(message.symbol, message.orders)
elif isinstance(message, QueryTransactedVolResponseMsg):
if message.mkt_closed:
self.mkt_closed = True
self.query_transacted_volume(
message.symbol, message.bid_volume, message.ask_volume
)
elif isinstance(message, MarketDataMsg):
self.handle_market_data(message)
# Now do we know the market hours?
have_mkt_hours = self.mkt_open is not None and self.mkt_close is not None
# Once we know the market open and close times, schedule a wakeup call for market open.
# Only do this once, when we first have both items.
if have_mkt_hours and not had_mkt_hours:
# Agents are asked to generate a wake offset from the market open time. We structure
# this as a subclass request so each agent can supply an appropriate offset relative
# to its trading frequency.
ns_offset = self.get_wake_frequency()
self.set_wakeup(self.mkt_open + ns_offset)
def get_last_trade(self, symbol: str) -> None:
"""
Used by any Trading Agent subclass to query the last trade price for a symbol.
This activity is not logged.
Arguments:
symbol: The symbol to query.
"""
self.send_message(self.exchange_id, QueryLastTradeMsg(symbol))
def get_current_spread(self, symbol: str, depth: int = 1) -> None:
"""
Used by any Trading Agent subclass to query the current spread for a symbol.
This activity is not logged.
Arguments:
symbol: The symbol to query.
depth:
"""
self.send_message(self.exchange_id, QuerySpreadMsg(symbol, depth))
def get_order_stream(self, symbol: str, length: int = 1) -> None:
"""
Used by any Trading Agent subclass to query the recent order s tream for a symbol.
Arguments:
symbol: The symbol to query.
length:
"""
self.send_message(self.exchange_id, QueryOrderStreamMsg(symbol, length))
def get_transacted_volume(
self, symbol: str, lookback_period: str = "10min"
) -> None:
"""
Used by any trading agent subclass to query the total transacted volume in a
given lookback period.
Arguments:
symbol: The symbol to query.
lookback_period: The length of time to consider when calculating the volume.
"""
self.send_message(
self.exchange_id, QueryTransactedVolMsg(symbol, lookback_period)
)
def create_limit_order(
self,
symbol: str,
quantity: int,
side: Side,
limit_price: int,
order_id: Optional[int] = None,
is_hidden: bool = False,
is_price_to_comply: bool = False,
insert_by_id: bool = False,
is_post_only: bool = False,
ignore_risk: bool = True,
tag: Any = None,
) -> LimitOrder:
"""
Used by any Trading Agent subclass to create a limit order.
Arguments:
symbol: A valid symbol.
quantity: Positive share quantity.
side: Side.BID or Side.ASK.
limit_price: Price in cents.
order_id: An optional order id (otherwise global autoincrement is used).
is_hidden:
is_price_to_comply:
insert_by_id:
is_post_only:
ignore_risk: Whether cash or risk limits should be enforced or ignored for
the order.
tag:
"""
order = LimitOrder(
agent_id=self.id,
time_placed=self.current_time,
symbol=symbol,
quantity=quantity,
side=side,
limit_price=limit_price,
is_hidden=is_hidden,
is_price_to_comply=is_price_to_comply,
insert_by_id=insert_by_id,
is_post_only=is_post_only,
order_id=order_id,
tag=tag,
)
if quantity > 0:
# Test if this order can be permitted given our at-risk limits.
new_holdings = self.holdings.copy()
q = order.quantity if order.side.is_bid() else -order.quantity
if order.symbol in new_holdings:
new_holdings[order.symbol] += q
else:
new_holdings[order.symbol] = q
# If at_risk is lower, always allow. Otherwise, new_at_risk must be below starting cash.
if not ignore_risk:
# Compute before and after at-risk capital.
at_risk = self.mark_to_market(self.holdings) - self.holdings["CASH"]
new_at_risk = self.mark_to_market(new_holdings) - new_holdings["CASH"]
if (new_at_risk > at_risk) and (new_at_risk > self.starting_cash):
logger.debug(
"TradingAgent ignored limit order due to at-risk constraints: {}\n{}".format(
order, self.fmt_holdings(self.holdings)
)
)
return
return order
else:
warnings.warn(f"TradingAgent ignored limit order of quantity zero: {order}")
def place_limit_order(
self,
symbol: str,
quantity: int,
side: Side,
limit_price: int,
order_id: Optional[int] = None,
is_hidden: bool = False,
is_price_to_comply: bool = False,
insert_by_id: bool = False,
is_post_only: bool = False,
ignore_risk: bool = True,
tag: Any = None,
) -> None:
"""
Used by any Trading Agent subclass to place a limit order.
Arguments:
symbol: A valid symbol.
quantity: Positive share quantity.
side: Side.BID or Side.ASK.
limit_price: Price in cents.
order_id: An optional order id (otherwise global autoincrement is used).
is_hidden:
is_price_to_comply:
insert_by_id:
is_post_only:
ignore_risk: Whether cash or risk limits should be enforced or ignored for
the order.
tag:
"""
order = self.create_limit_order(
symbol,
quantity,
side,
limit_price,
order_id,
is_hidden,
is_price_to_comply,
insert_by_id,
is_post_only,
ignore_risk,
tag,
)
if order is not None:
self.orders[order.order_id] = deepcopy(order)
self.send_message(self.exchange_id, LimitOrderMsg(order))
if self.log_orders:
self.logEvent("ORDER_SUBMITTED", order.to_dict(), deepcopy_event=False)
def place_market_order(
self,
symbol: str,
quantity: int,
side: Side,
order_id: Optional[int] = None,
ignore_risk: bool = True,
tag: Any = None,
) -> None:
"""
Used by any Trading Agent subclass to place a market order.
The market order is created as multiple limit orders crossing the spread
walking the book until all the quantities are matched.
Arguments:
symbol: Name of the stock traded.
quantity: Order quantity.
side: Side.BID or Side.ASK.
order_id: Order ID for market replay.
ignore_risk: Whether cash or risk limits should be enforced or ignored for
the order.
tag:
"""
order = MarketOrder(
self.id, self.current_time, symbol, quantity, side, order_id, tag
)
if quantity > 0:
# compute new holdings
new_holdings = self.holdings.copy()
q = order.quantity if order.side.is_bid() else -order.quantity
if order.symbol in new_holdings:
new_holdings[order.symbol] += q
else:
new_holdings[order.symbol] = q
if not ignore_risk:
# Compute before and after at-risk capital.
at_risk = self.mark_to_market(self.holdings) - self.holdings["CASH"]
new_at_risk = self.mark_to_market(new_holdings) - new_holdings["CASH"]
if (new_at_risk > at_risk) and (new_at_risk > self.starting_cash):
logger.debug(
"TradingAgent ignored market order due to at-risk constraints: {}\n{}".format(
order, self.fmt_holdings(self.holdings)
)
)
return
self.orders[order.order_id] = deepcopy(order)
self.send_message(self.exchange_id, MarketOrderMsg(order))
if self.log_orders:
self.logEvent("ORDER_SUBMITTED", order.to_dict(), deepcopy_event=False)
else:
warnings.warn(
"TradingAgent ignored market order of quantity zero: {}", order
)
def place_multiple_orders(
self, orders: List[Union[LimitOrder, MarketOrder]]
) -> None:
"""
Used by any Trading Agent subclass to place multiple orders at the same time.
Arguments:
orders: A list of Orders to place with the exchange as a single batch.
"""
messages = []
for order in orders:
if isinstance(order, LimitOrder):
messages.append(LimitOrderMsg(order))
elif isinstance(order, MarketOrder):
messages.append(MarketOrderMsg(order))
else:
raise Exception("Expected LimitOrder or MarketOrder")
# Copy the intended order for logging, so any changes made to it elsewhere
# don't retroactively alter our "as placed" log of the order. Eventually
# it might be nice to make the whole history of the order into transaction
# objects inside the order (we're halfway there) so there CAN be just a single
# object per order, that never alters its original state, and eliminate all
# these copies.
self.orders[order.order_id] = deepcopy(order)
if self.log_orders:
self.logEvent("ORDER_SUBMITTED", order.to_dict(), deepcopy_event=False)
if len(messages) > 0:
self.send_message_batch(self.exchange_id, messages)
def cancel_order(
self, order: LimitOrder, tag: Optional[str] = None, metadata: dict = {}
) -> None:
"""
Used by derived classes of TradingAgent to cancel a limit order.
The order must currently appear in the agent's open orders list.
Arguments:
order: The limit order to cancel.
tag:
metadata:
"""
if isinstance(order, LimitOrder):
self.send_message(self.exchange_id, CancelOrderMsg(order, tag, metadata))
if self.log_orders:
self.logEvent("CANCEL_SUBMITTED", order.to_dict(), deepcopy_event=False)
else:
warnings.warn(f"Order {order} of type, {type(order)} cannot be cancelled")
def cancel_all_orders(self):
"""
Cancels all current limit orders held by this agent.
"""
for order in self.orders.values():
if isinstance(order, LimitOrder):
self.cancel_order(order)
def partial_cancel_order(
self,
order: LimitOrder,
quantity: int,
tag: Optional[str] = None,
metadata: dict = {},
) -> None:
"""
Used by any Trading Agent subclass to modify any existing limit order.
The order must currently appear in the agent's open orders list.
Arguments:
order: The limit order to partially cancel.
quantity:
tag:
metadata:
"""
self.send_message(
self.exchange_id, PartialCancelOrderMsg(order, quantity, tag, metadata)
)
if self.log_orders:
self.logEvent("CANCEL_PARTIAL_ORDER", order.to_dict(), deepcopy_event=False)
def modify_order(self, order: LimitOrder, new_order: LimitOrder) -> None:
"""
Used by any Trading Agent subclass to modify any existing limit order.
The order must currently appear in the agent's open orders list. Some
additional tests might be useful here to ensure the old and new orders are
the same in some way.
Arguments:
order: The existing limit order.
new_order: The limit order to update the existing order with.
"""
self.send_message(self.exchange_id, ModifyOrderMsg(order, new_order))
if self.log_orders:
self.logEvent("MODIFY_ORDER", order.to_dict(), deepcopy_event=False)
def replace_order(self, order: LimitOrder, new_order: LimitOrder) -> None:
"""
Used by any Trading Agent subclass to replace any existing limit order.
The order must currently appear in the agent's open orders list. Some
additional tests might be useful here to ensure the old and new orders are
the same in some way.
Arguments:
order: The existing limit order.
new_order: The new limit order to replace the existing order with.
"""
self.send_message(self.exchange_id, ReplaceOrderMsg(self.id, order, new_order))
if self.log_orders:
self.logEvent("REPLACE_ORDER", order.to_dict(), deepcopy_event=False)
def order_executed(self, order: Order) -> None:
"""
Handles OrderExecuted messages from an exchange agent.
Subclasses may wish to extend, but should still call parent method for basic
portfolio/returns tracking.
Arguments:
order: The order that has been executed by the exchange.
"""
logger.debug(f"Received notification of execution for: {order}")
if self.log_orders:
self.logEvent("ORDER_EXECUTED", order.to_dict(), deepcopy_event=False)
# At the very least, we must update CASH and holdings at execution time.
qty = order.quantity if order.side.is_bid() else -1 * order.quantity
sym = order.symbol
if sym in self.holdings:
self.holdings[sym] += qty
else:
self.holdings[sym] = qty
if self.holdings[sym] == 0:
del self.holdings[sym]
# As with everything else, CASH holdings are in CENTS.
self.holdings["CASH"] -= qty * order.fill_price
# If this original order is now fully executed, remove it from the open orders list.
# Otherwise, decrement by the quantity filled just now. It is _possible_ that due
# to timing issues, it might not be in the order list (i.e. we issued a cancellation
# but it was executed first, or something).
if order.order_id in self.orders:
o = self.orders[order.order_id]
if order.quantity >= o.quantity:
del self.orders[order.order_id]
else:
o.quantity -= order.quantity
else:
warnings.warn(f"Execution received for order not in orders list: {order}")
logger.debug(f"After order execution, agent open orders: {self.orders}")
self.logEvent("HOLDINGS_UPDATED", self.holdings)
def order_accepted(self, order: LimitOrder) -> None:
"""
Handles OrderAccepted messages from an exchange agent.
Subclasses may wish to extend.
Arguments:
order: The order that has been accepted from the exchange.
"""
logger.debug(f"Received notification of acceptance for: {order}")
if self.log_orders:
self.logEvent("ORDER_ACCEPTED", order.to_dict(), deepcopy_event=False)
# We may later wish to add a status to the open orders so an agent can tell whether
# a given order has been accepted or not (instead of needing to override this method).
def order_cancelled(self, order: LimitOrder) -> None:
"""
Handles OrderCancelled messages from an exchange agent.
Subclasses may wish to extend.
Arguments:
order: The order that has been cancelled by the exchange.
"""
logger.debug(f"Received notification of cancellation for: {order}")
if self.log_orders:
self.logEvent("ORDER_CANCELLED", order.to_dict(), deepcopy_event=False)
# Remove the cancelled order from the open orders list. We may of course wish to have
# additional logic here later, so agents can easily "look for" cancelled orders. Of
# course they can just override this method.
if order.order_id in self.orders:
del self.orders[order.order_id]
else:
warnings.warn(
f"Cancellation received for order not in orders list: {order}"
)
def order_partial_cancelled(self, order: LimitOrder) -> None:
"""
Handles OrderCancelled messages from an exchange agent.
Subclasses may wish to extend.
Arguments:
order: The order that has been partially cancelled by the exchange.
"""
logger.debug(f"Received notification of partial cancellation for: {order}")
if self.log_orders:
self.logEvent("PARTIAL_CANCELLED", order.to_dict())
# if orders still in the list of agent's order update agent's knowledge of
# current state of the order
if order.order_id in self.orders:
self.orders[order.order_id] = order
else:
warnings.warn(
f"partial cancellation received for order not in orders list: {order}"
)
logger.debug(
f"After order partial cancellation, agent open orders: {self.orders}"
)
self.logEvent("HOLDINGS_UPDATED", self.holdings)
def order_modified(self, order: LimitOrder) -> None:
"""
Handles OrderModified messages from an exchange agent.
Subclasses may wish to extend.
Arguments:
order: The order that has been modified at the exchange.
"""
logger.debug(f"Received notification of modification for: {order}")
if self.log_orders:
self.logEvent("ORDER_MODIFIED", order.to_dict())
# if orders still in the list of agent's order update agent's knowledge of
# current state of the order
if order.order_id in self.orders:
self.orders[order.order_id] = order
else:
warnings.warn("Execution received for order not in orders list: {order}")
logger.debug(f"After order modification, agent open orders: {self.orders}")
self.logEvent("HOLDINGS_UPDATED", self.holdings)
def order_replaced(self, old_order: LimitOrder, new_order: LimitOrder) -> None:
"""
Handles OrderReplaced messages from an exchange agent.
Subclasses may wish to extend.
Arguments:
order: The order that has been modified at the exchange.
"""
logger.debug(f"Received notification of replacement for: {old_order}")
if self.log_orders:
self.logEvent("ORDER_REPLACED", old_order.to_dict())
# if orders still in the list of agent's order update agent's knowledge of
# current state of the order
if old_order.order_id in self.orders:
del self.orders[old_order.order_id]
else:
warnings.warn(
f"Execution received for order not in orders list: {old_order}"
)
self.orders[new_order.order_id] = new_order
logger.debug(f"After order replacement, agent open orders: {self.orders}")
# After execution, log holdings.
self.logEvent("HOLDINGS_UPDATED", self.holdings)
def market_closed(self) -> None:
"""
Handles MarketClosedMsg messages from an exchange agent.
Subclasses may wish to extend.
"""
logger.debug("Received notification of market closure.")
# Log this activity.
self.logEvent("MKT_CLOSED")
# Remember that this has happened.
self.mkt_closed = True
def query_last_trade(self, symbol: str, price: int) -> None:
"""
Handles QueryLastTradeResponseMsg messages from an exchange agent.
Arguments:
symbol: The symbol that was queried.
price: The price at which the last trade executed at.
"""
self.last_trade[symbol] = price
logger.debug(
"Received last trade price of {} for {}.".format(
self.last_trade[symbol], symbol
)
)
if self.mkt_closed:
# Note this as the final price of the day.
self.daily_close_price[symbol] = self.last_trade[symbol]
logger.debug(
"Received daily close price of {} for {}.".format(
self.last_trade[symbol], symbol
)
)
def query_spread(
self,
symbol: str,
price: int,
bids: List[List[Tuple[int, int]]],
asks: List[List[Tuple[int, int]]],
book: str,
) -> None:
"""
Handles QuerySpreadResponseMsg messages from an exchange agent.
Arguments:
symbol: The symbol that was queried.
price:
bids:
asks:
book:
"""
# The spread message now also includes last price for free.
self.query_last_trade(symbol, price)
self.known_bids[symbol] = bids
self.known_asks[symbol] = asks
if bids:
best_bid, best_bid_qty = (bids[0][0], bids[0][1])
else:
best_bid, best_bid_qty = ("No bids", 0)
if asks:
best_ask, best_ask_qty = (asks[0][0], asks[0][1])
else:
best_ask, best_ask_qty = ("No asks", 0)
logger.debug(
"Received spread of {} @ {} / {} @ {} for {}".format(
best_bid_qty, best_bid, best_ask_qty, best_ask, symbol
)
)
self.logEvent("BID_DEPTH", bids)
self.logEvent("ASK_DEPTH", asks)
self.logEvent(
"IMBALANCE", [sum([x[1] for x in bids]), sum([x[1] for x in asks])]
)
self.book = book
def handle_market_data(self, message: MarketDataMsg) -> None:
"""
Handles Market Data messages for agents using subscription mechanism.
Arguments:
message: The market data message,
"""
if isinstance(message, L2DataMsg):
symbol = message.symbol
self.known_asks[symbol] = message.asks
self.known_bids[symbol] = message.bids
self.last_trade[symbol] = message.last_transaction
self.exchange_ts[symbol] = message.exchange_ts
def query_order_stream(self, symbol: str, orders) -> None:
"""
Handles QueryOrderStreamResponseMsg messages from an exchange agent.
It is up to the requesting agent to do something with the data, which is a list
of dictionaries keyed by order id. The list index is 0 for orders since the most
recent trade, 1 for orders that led up to the most recent trade, and so on.
Agents are not given index 0 (orders more recent than the last trade).
Arguments:
symbol: The symbol that was queried.
orders:
"""
self.stream_history[symbol] = orders
def query_transacted_volume(
self, symbol: str, bid_volume: int, ask_volume: int
) -> None:
"""
Handles the QueryTransactedVolResponseMsg messages from the exchange agent.
Arguments:
symbol: The symbol that was queried.
bid_vol: The volume that has transacted on the bid side for the queried period.
ask_vol: The volume that has transacted on the ask side for the queried period.
"""
self.transacted_volume[symbol] = (bid_volume, ask_volume)
# Utility functions that perform calculations from available knowledge, but implement no
# particular strategy.
def get_known_bid_ask(self, symbol: str, best: bool = True):
"""
Extract the current known bid and asks.
This does NOT request new information.
Arguments:
symbol: The symbol to query.
best:
"""
if best:
bid = self.known_bids[symbol][0][0] if self.known_bids[symbol] else None
ask = self.known_asks[symbol][0][0] if self.known_asks[symbol] else None
bid_vol = self.known_bids[symbol][0][1] if self.known_bids[symbol] else 0
ask_vol = self.known_asks[symbol][0][1] if self.known_asks[symbol] else 0
return bid, bid_vol, ask, ask_vol
else:
bids = self.known_bids[symbol] if self.known_bids[symbol] else None
asks = self.known_asks[symbol] if self.known_asks[symbol] else None
return bids, asks
def get_known_liquidity(self, symbol: str, within: float = 0.00) -> Tuple[int, int]:
"""
Extract the current bid and ask liquidity within a certain proportion of the
inside bid and ask. (i.e. within=0.01 means to report total BID shares
within 1% of the best bid price, and total ASK shares within 1% of the best
ask price)
Arguments:
symbol: The symbol to query.
within:
Returns:
(bid_liquidity, ask_liquidity). Note that this is from the order book
perspective, not the agent perspective. (The agent would be selling into
the bid liquidity, etc.)
"""
bid_liq = self.get_book_liquidity(self.known_bids[symbol], within)
ask_liq = self.get_book_liquidity(self.known_asks[symbol], within)
logger.debug("Bid/ask liq: {}, {}".format(bid_liq, ask_liq))
logger.debug("Known bids: {}".format(self.known_bids[self.symbol]))
logger.debug("Known asks: {}".format(self.known_asks[self.symbol]))
return bid_liq, ask_liq
def get_book_liquidity(self, book: Iterable[Tuple[int, int]], within: float) -> int:
"""
Helper function for the above. Checks one side of the known order book.
Arguments:
book:
within:
"""
liq = 0
for i, (price, shares) in enumerate(book):
if i == 0:
best = price
# Is this price within "within" proportion of the best price?
if abs(best - price) <= int(round(best * within)):
logger.debug(
"Within {} of {}: {} with {} shares".format(
within, best, price, shares
)
)
liq += shares
return liq
def mark_to_market(
self, holdings: Mapping[str, int], use_midpoint: bool = False
) -> int:
"""
Marks holdings to market (including cash).
Arguments:
holdings:
use_midpoint:
"""
cash = holdings["CASH"]
cash += self.basket_size * self.nav_diff
for symbol, shares in holdings.items():
if symbol == "CASH":
continue
if use_midpoint:
bid, ask, midpoint = self.get_known_bid_ask_midpoint(symbol)
if bid is None or ask is None or midpoint is None:
value = self.last_trade[symbol] * shares
else:
value = midpoint * shares
else:
value = self.last_trade[symbol] * shares
cash += value
self.logEvent(
"MARK_TO_MARKET",
"{} {} @ {} == {}".format(
shares, symbol, self.last_trade[symbol], value
),
)
self.logEvent("MARKED_TO_MARKET", cash)
return cash
def get_holdings(self, symbol: str) -> int:
"""
Gets holdings. Returns zero for any symbol not held.
Arguments:
symbol: The symbol to query.
"""
return self.holdings[symbol] if symbol in self.holdings else 0
def get_known_bid_ask_midpoint(
self, symbol: str
) -> Tuple[Optional[int], Optional[int], Optional[int]]:
"""
Get the known best bid, ask, and bid/ask midpoint from cached data. No volume.
Arguments:
symbol: The symbol to query.
"""
bid = self.known_bids[symbol][0][0] if self.known_bids[symbol] else None
ask = self.known_asks[symbol][0][0] if self.known_asks[symbol] else None
midpoint = (
int(round((bid + ask) / 2)) if bid is not None and ask is not None else None
)
return bid, ask, midpoint
def get_average_transaction_price(self) -> float:
"""Calculates the average price paid (weighted by the order size)."""
return round(
sum(
executed_order.quantity * executed_order.fill_price
for executed_order in self.executed_orders
)
/ sum(executed_order.quantity for executed_order in self.executed_orders),
2,
)
def fmt_holdings(self, holdings: Mapping[str, int]) -> str:
"""
Prints holdings.
Standard dictionary->string representation is almost fine, but it is less
confusing to see the CASH holdings in dollars and cents, instead of just integer
cents. We could change to a Holdings object that knows to print CASH "special".
Arguments:
holdings:
"""
h = ""
for k, v in sorted(holdings.items()):
if k == "CASH":
continue
h += "{}: {}, ".format(k, v)
# There must always be a CASH entry.
h += "{}: {}".format("CASH", holdings["CASH"])
h = "{ " + h + " }"
return h
| 43,676 | 34.138375 | 111 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/value_agent.py
|
import logging
from typing import Optional
import numpy as np
from abides_core import Message, NanosecondTime
from ..messages.query import QuerySpreadResponseMsg
from ..orders import Side
from .trading_agent import TradingAgent
logger = logging.getLogger(__name__)
class ValueAgent(TradingAgent):
def __init__(
self,
id: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
symbol: str = "IBM",
starting_cash: int = 100_000,
sigma_n: float = 10_000,
r_bar: int = 100_000,
kappa: float = 0.05,
sigma_s: float = 100_000,
order_size_model=None,
lambda_a: float = 0.005,
log_orders: float = False,
) -> None:
# Base class init.
super().__init__(id, name, type, random_state, starting_cash, log_orders)
# Store important parameters particular to the ZI agent.
self.symbol: str = symbol # symbol to trade
self.sigma_n: float = sigma_n # observation noise variance
self.r_bar: int = r_bar # true mean fundamental value
self.kappa: float = kappa # mean reversion parameter
self.sigma_s: float = sigma_s # shock variance
self.lambda_a: float = lambda_a # mean arrival rate of ZI agents
# The agent uses this to track whether it has begun its strategy or is still
# handling pre-market tasks.
self.trading: bool = False
# The agent begins in its "complete" state, not waiting for
# any special event or condition.
self.state: str = "AWAITING_WAKEUP"
# The agent maintains two priors: r_t and sigma_t (value and error estimates).
self.r_t: int = r_bar
self.sigma_t: float = 0
# The agent must track its previous wake time, so it knows how many time
# units have passed.
self.prev_wake_time: Optional[NanosecondTime] = None
# Percent of time that the agent will aggress the spread
self.percent_aggr: float = 0.1
self.size: Optional[int] = (
self.random_state.randint(20, 50) if order_size_model is None else None
)
self.order_size_model = order_size_model # Probabilistic model for order size
self.depth_spread: int = 2
def kernel_starting(self, start_time: NanosecondTime) -> None:
# self.kernel is set in Agent.kernel_initializing()
# self.exchange_id is set in TradingAgent.kernel_starting()
super().kernel_starting(start_time)
self.oracle = self.kernel.oracle
def kernel_stopping(self) -> None:
# Always call parent method to be safe.
super().kernel_stopping()
# Print end of day valuation.
H = int(round(self.get_holdings(self.symbol), -2) / 100)
# May request real fundamental value from oracle as part of final cleanup/stats.
# marked to fundamental
rT = self.oracle.observe_price(
self.symbol, self.current_time, sigma_n=0, random_state=self.random_state
)
# final (real) fundamental value times shares held.
surplus = rT * H
logger.debug("Surplus after holdings: {}", surplus)
# Add ending cash value and subtract starting cash value.
surplus += self.holdings["CASH"] - self.starting_cash
surplus = float(surplus) / self.starting_cash
self.logEvent("FINAL_VALUATION", surplus, True)
logger.debug(
"{} final report. Holdings: {}, end cash: {}, start cash: {}, final fundamental: {}, surplus: {}",
self.name,
H,
self.holdings["CASH"],
self.starting_cash,
rT,
surplus,
)
def wakeup(self, current_time: NanosecondTime) -> None:
# Parent class handles discovery of exchange times and market_open wakeup call.
super().wakeup(current_time)
self.state = "INACTIVE"
if not self.mkt_open or not self.mkt_close:
# TradingAgent handles discovery of exchange times.
return
else:
if not self.trading:
self.trading = True
# Time to start trading!
logger.debug("{} is ready to start trading now.", self.name)
# Steady state wakeup behavior starts here.
# If we've been told the market has closed for the day, we will only request
# final price information, then stop.
if self.mkt_closed and (self.symbol in self.daily_close_price):
# Market is closed and we already got the daily close price.
return
delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
self.set_wakeup(current_time + int(round(delta_time)))
if self.mkt_closed and (not self.symbol in self.daily_close_price):
self.get_current_spread(self.symbol)
self.state = "AWAITING_SPREAD"
return
self.cancel_all_orders()
if type(self) == ValueAgent:
self.get_current_spread(self.symbol)
self.state = "AWAITING_SPREAD"
else:
self.state = "ACTIVE"
def updateEstimates(self) -> int:
# Called by a background agent that wishes to obtain a new fundamental observation,
# update its internal estimation parameters, and compute a new total valuation for the
# action it is considering.
# The agent obtains a new noisy observation of the current fundamental value
# and uses this to update its internal estimates in a Bayesian manner.
obs_t = self.oracle.observe_price(
self.symbol,
self.current_time,
sigma_n=self.sigma_n,
random_state=self.random_state,
)
logger.debug("{} observed {} at {}", self.name, obs_t, self.current_time)
# Update internal estimates of the current fundamental value and our error of same.
# If this is our first estimate, treat the previous wake time as "market open".
if self.prev_wake_time is None:
self.prev_wake_time = self.mkt_open
# First, obtain an intermediate estimate of the fundamental value by advancing
# time from the previous wake time to the current time, performing mean
# reversion at each time step.
# delta must be integer time steps since last wake
delta = self.current_time - self.prev_wake_time
# Update r estimate for time advancement.
r_tprime = (1 - (1 - self.kappa) ** delta) * self.r_bar
r_tprime += ((1 - self.kappa) ** delta) * self.r_t
# Update sigma estimate for time advancement.
sigma_tprime = ((1 - self.kappa) ** (2 * delta)) * self.sigma_t
sigma_tprime += (
(1 - (1 - self.kappa) ** (2 * delta)) / (1 - (1 - self.kappa) ** 2)
) * self.sigma_s
# Apply the new observation, with "confidence" in the observation inversely proportional
# to the observation noise, and "confidence" in the previous estimate inversely proportional
# to the shock variance.
self.r_t = (self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime
self.r_t += (sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t
self.sigma_t = (self.sigma_n * self.sigma_t) / (self.sigma_n + self.sigma_t)
# Now having a best estimate of the fundamental at time t, we can make our best estimate
# of the final fundamental (for time T) as of current time t. Delta is now the number
# of time steps remaining until the simulated exchange closes.
delta = max(0, (self.mkt_close - self.current_time))
# IDEA: instead of letting agent "imagine time forward" to the end of the day,
# impose a maximum forward delta, like ten minutes or so. This could make
# them think more like traders and less like long-term investors. Add
# this line of code (keeping the max() line above) to try it.
# delta = min(delta, 1000000000 * 60 * 10)
r_T = (1 - (1 - self.kappa) ** delta) * self.r_bar
r_T += ((1 - self.kappa) ** delta) * self.r_t
# Our final fundamental estimate should be quantized to whole units of value.
r_T = int(round(r_T))
# Finally (for the final fundamental estimation section) remember the current
# time as the previous wake time.
self.prev_wake_time = self.current_time
logger.debug(
"{} estimates r_T = {} as of {}", self.name, r_T, self.current_time
)
return r_T
def placeOrder(self) -> None:
# estimate final value of the fundamental price
# used for surplus calculation
r_T = self.updateEstimates()
bid, bid_vol, ask, ask_vol = self.get_known_bid_ask(self.symbol)
if bid and ask:
mid = int((ask + bid) / 2)
spread = abs(ask - bid)
if self.random_state.rand() < self.percent_aggr:
adjust_int = 0
else:
adjust_int = self.random_state.randint(
0, min(9223372036854775807 - 1, self.depth_spread * spread)
)
# adjustment to the limit price, allowed to post inside the spread
# or deeper in the book as a passive order to maximize surplus
if r_T < mid:
# fundamental belief that price will go down, place a sell order
buy = False
p = (
bid + adjust_int
) # submit a market order to sell, limit order inside the spread or deeper in the book
elif r_T >= mid:
# fundamental belief that price will go up, buy order
buy = True
p = (
ask - adjust_int
) # submit a market order to buy, a limit order inside the spread or deeper in the book
else:
# initialize randomly
buy = self.random_state.randint(0, 1 + 1)
p = r_T
# Place the order
if self.order_size_model is not None:
self.size = self.order_size_model.sample(random_state=self.random_state)
side = Side.BID if buy == 1 else Side.ASK
if self.size > 0:
self.place_limit_order(self.symbol, self.size, side, p)
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
# Parent class schedules market open wakeup call once market open/close times are known.
super().receive_message(current_time, sender_id, message)
# We have been awakened by something other than our scheduled wakeup.
# If our internal state indicates we were waiting for a particular event,
# check if we can transition to a new state.
if self.state == "AWAITING_SPREAD":
# We were waiting to receive the current spread/book. Since we don't currently
# track timestamps on retained information, we rely on actually seeing a
# QUERY_SPREAD response message.
if isinstance(message, QuerySpreadResponseMsg):
# This is what we were waiting for.
# But if the market is now closed, don't advance to placing orders.
if self.mkt_closed:
return
# We now have the information needed to place a limit order with the eta
# strategic threshold parameter.
self.placeOrder()
self.state = "AWAITING_WAKEUP"
# Cancel all open orders.
# Return value: did we issue any cancellation requests?
def get_wake_frequency(self) -> NanosecondTime:
delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
return int(round(delta_time))
| 11,971 | 38.124183 | 111 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/exchange_agent.py
|
import datetime as dt
import logging
import warnings
from abc import ABC
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, DefaultDict, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from abides_core import Kernel, Message, NanosecondTime
from ..messages.market import (
MarketClosedMsg,
MarketHoursMsg,
MarketHoursRequestMsg,
MarketClosePriceRequestMsg,
MarketClosePriceMsg,
)
from ..messages.marketdata import (
BookImbalanceDataMsg,
L1SubReqMsg,
L2SubReqMsg,
L3SubReqMsg,
TransactedVolSubReqMsg,
MarketDataSubReqMsg,
L1DataMsg,
L2DataMsg,
L3DataMsg,
TransactedVolDataMsg,
BookImbalanceSubReqMsg,
MarketDataEventMsg,
)
from ..messages.order import (
LimitOrderMsg,
MarketOrderMsg,
PartialCancelOrderMsg,
CancelOrderMsg,
ModifyOrderMsg,
ReplaceOrderMsg,
OrderMsg,
)
from ..messages.orderbook import OrderAcceptedMsg, OrderExecutedMsg, OrderCancelledMsg
from ..messages.query import (
QueryLastTradeMsg,
QueryLastTradeResponseMsg,
QueryMsg,
QuerySpreadMsg,
QuerySpreadResponseMsg,
QueryOrderStreamMsg,
QueryOrderStreamResponseMsg,
QueryTransactedVolMsg,
QueryTransactedVolResponseMsg,
)
from ..orders import Side
from ..order_book import OrderBook
from .financial_agent import FinancialAgent
logger = logging.getLogger(__name__)
pd.set_option("display.max_rows", 500)
class ExchangeAgent(FinancialAgent):
"""
The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp
to open and close trading, a list of equity symbols for which it should create order
books, a frequency at which to archive snapshots of its order books, a pipeline
delay (in ns) for order activity, the exchange computation delay (in ns), the levels
of order stream history to maintain per symbol (maintains all orders that led to the
last N trades), whether to log all order activity to the agent log, and a random
state object (already seeded) to use for stochasticity.
"""
@dataclass
class MetricTracker(ABC):
# droupout metrics
total_time_no_liquidity_asks: int = 0
total_time_no_liquidity_bids: int = 0
pct_time_no_liquidity_asks: float = 0
pct_time_no_liquidity_bids: float = 0
# exchanged volume
total_exchanged_volume: int = 0
# last trade
last_trade: Optional[int] = 0
# can be extended
@dataclass
class BaseDataSubscription(ABC):
"""
Base class for all types of data subscription registered with this agent.
"""
agent_id: int
last_update_ts: int
@dataclass
class FrequencyBasedSubscription(BaseDataSubscription, ABC):
"""
Base class for all types of data subscription that are sent from this agent
at a fixed, regular frequency.
"""
freq: int
@dataclass
class L1DataSubscription(FrequencyBasedSubscription):
pass
@dataclass
class L2DataSubscription(FrequencyBasedSubscription):
depth: int
@dataclass
class L3DataSubscription(FrequencyBasedSubscription):
depth: int
@dataclass
class TransactedVolDataSubscription(FrequencyBasedSubscription):
lookback: str
@dataclass
class EventBasedSubscription(BaseDataSubscription, ABC):
"""
Base class for all types of data subscription that are sent from this agent
when triggered by an event or specific circumstance.
"""
event_in_progress: bool
@dataclass
class BookImbalanceDataSubscription(EventBasedSubscription):
# Properties:
min_imbalance: float
# State:
imbalance: Optional[float] = None
side: Optional[Side] = None
def __init__(
self,
id: int,
mkt_open: NanosecondTime,
mkt_close: NanosecondTime,
symbols: List[str],
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
book_logging: bool = True,
book_log_depth: int = 10,
pipeline_delay: int = 40000,
computation_delay: int = 1,
stream_history: int = 0,
log_orders: bool = False,
use_metric_tracker: bool = True,
) -> None:
super().__init__(id, name, type, random_state)
# symbols
self.symbols = symbols
# Do not request repeated wakeup calls.
self.reschedule: bool = False
# Store this exchange's open and close times.
self.mkt_open: NanosecondTime = mkt_open
self.mkt_close: NanosecondTime = mkt_close
# Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
# delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
# hours, etc).
self.pipeline_delay: int = pipeline_delay
# Computation delay is applied on every wakeup call or message received.
self.computation_delay: int = computation_delay
# The exchange maintains an order stream of all orders leading to the last L trades
# to support certain agents from the auction literature (GD, HBL, etc).
self.stream_history: int = stream_history
self.book_logging: bool = book_logging
self.book_log_depth: int = book_log_depth
# Log all order activity?
self.log_orders: bool = log_orders
# Create an order book for each symbol.
self.order_books: Dict[str, OrderBook] = {
symbol: OrderBook(self, symbol) for symbol in symbols
}
if use_metric_tracker:
# Create a metric tracker for each symbol.
self.metric_trackers: Dict[str, ExchangeAgent.MetricTracker] = {
symbol: self.MetricTracker() for symbol in symbols
}
# The subscription dict is a dictionary with the key = agent ID,
# value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
# frequency (min number of ns between messages), last agent update timestamp]
# e.g. {101 : {'AAPL' : [1, 10, NanosecondTime(10:00:00)}}
self.data_subscriptions: DefaultDict[
str, List[ExchangeAgent.BaseDataSubscription]
] = defaultdict(list)
# Store a list of agents who have requested market close price information.
# (this is most likely all agents)
self.market_close_price_subscriptions: List[int] = []
def kernel_initializing(self, kernel: "Kernel") -> None:
"""
The exchange agent overrides this to obtain a reference to an oracle.
This is needed to establish a "last trade price" at open (i.e. an opening
price) in case agents query last trade before any simulated trades are made.
This can probably go away once we code the opening cross auction.
Arguments:
kernel: The ABIDES kernel that this agent instance belongs to.
"""
super().kernel_initializing(kernel)
assert self.kernel is not None
self.oracle = self.kernel.oracle
# Obtain opening prices (in integer cents). These are not noisy right now.
for symbol in self.order_books:
try:
self.order_books[symbol].last_trade = self.oracle.get_daily_open_price(
symbol, self.mkt_open
)
logger.debug(
"Opening price for {} is {}".format(
symbol, self.order_books[symbol].last_trade
)
)
except AttributeError as e:
logger.debug(str(e))
# Set a wakeup for the market close so we can send market close price messages.
self.set_wakeup(self.mkt_close)
def kernel_terminating(self) -> None:
"""
The exchange agent overrides this to additionally log the full depth of its
order books for the entire day.
"""
super().kernel_terminating()
# print(self.order_books['ABM'].book_log2)
# If the oracle supports writing the fundamental value series for its
bid_volume, ask_volume = self.order_books["ABM"].get_transacted_volume(
self.current_time - self.mkt_open
)
self.total_exchanged_volume = bid_volume + ask_volume
# symbols, write them to disk.
for symbol in self.symbols:
self.analyse_order_book(symbol)
for symbol in self.symbols:
bid_volume, ask_volume = self.order_books[symbol].get_transacted_volume(
self.current_time - self.mkt_open
)
self.metric_trackers[symbol].total_exchanged_volume = (
bid_volume + ask_volume
)
self.metric_trackers[symbol].last_trade = self.order_books[
symbol
].last_trade
if self.log_orders == None:
return
# If the oracle supports writing the fundamental value series for its
# symbols, write them to disk.
if hasattr(self.oracle, "f_log"):
for symbol in self.oracle.f_log:
dfFund = pd.DataFrame(self.oracle.f_log[symbol])
if not dfFund.empty:
dfFund.set_index("FundamentalTime", inplace=True)
self.write_log(dfFund, filename="fundamental_{}".format(symbol))
logger.debug("Fundamental archival complete.")
def wakeup(self, current_time: NanosecondTime):
super().wakeup(current_time)
# If we have reached market close, send market close price messages to all agents
# that requested them.
if current_time >= self.mkt_close:
message = MarketClosePriceMsg(
{symbol: book.last_trade for symbol, book in self.order_books.items()}
)
for agent in self.market_close_price_subscriptions:
self.send_message(agent, message)
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""
Arguments:
current_time:
sender_id:
message:
"""
super().receive_message(current_time, sender_id, message)
# Unless the intent of an experiment is to examine computational issues
# within an Exchange, it will typically have either 1 ns delay (near
# instant but cannot process multiple orders in the same atomic time unit)
# or 0 ns delay (can process any number of orders, always in the atomic
# time unit in which they are received). This is separate from, and
# additional to, any parallel pipeline delay imposed for order book
# activity.
# Note that computation delay MUST be updated before any calls to send_message.
self.set_computation_delay(self.computation_delay)
# Is the exchange closed? (This block only affects post-close, not pre-open.)
if current_time > self.mkt_close:
# Most messages after close will receive a 'MKT_CLOSED' message in
# response. A few things might still be processed, like requests
# for final trade prices or such.
if isinstance(message, OrderMsg):
if isinstance(message, ModifyOrderMsg):
logger.debug(
"{} received {}: OLD: {} NEW: {}".format(
self.name,
message.type(),
message.old_order,
message.new_order,
)
)
else:
logger.debug(
"{} received {}: {}".format(
self.name, message.type(), message.order
)
)
self.send_message(sender_id, MarketClosedMsg())
# Don't do any further processing on these messages!
return
elif isinstance(message, QueryMsg):
# Specifically do allow querying after market close, so agents can get the
# final trade of the day as their "daily close" price for a symbol.
pass
else:
logger.debug(
"{} received {}, discarded: market is closed.".format(
self.name, message.type()
)
)
self.send_message(sender_id, MarketClosedMsg())
# Don't do any further processing on these messages!
return
if isinstance(message, OrderMsg):
# Log order messages only if that option is configured. Log all other messages.
if self.log_orders:
if isinstance(message, (ModifyOrderMsg, ReplaceOrderMsg)):
self.logEvent(
message.type(),
message.new_order.to_dict(),
deepcopy_event=False,
)
else:
self.logEvent(
message.type(), message.order.to_dict(), deepcopy_event=False
)
else:
self.logEvent(message.type(), message)
if isinstance(message, MarketDataSubReqMsg):
# Handle the DATA SUBSCRIPTION request and cancellation messages from the agents.
if message.symbol not in self.order_books:
return
if message.cancel == True:
logger.debug(
"{} received MarketDataSubscriptionCancellation request from agent {}".format(
self.name, sender_id
)
)
for data_sub in self.data_subscriptions[message.symbol]:
if (
data_sub.agent_id == sender_id
and data_sub.freq == message.freq
and data_sub.depth == message.depth
and data_sub.__class__ == message.__class__
):
self.data_subscriptions[message.symbol].remove(data_sub)
else:
logger.debug(
"{} received MarketDataSubscriptionRequest request from agent {}".format(
self.name, sender_id
)
)
if isinstance(message, L1SubReqMsg):
sub: self.BaseDataSubscription = self.L1DataSubscription(
sender_id, current_time, message.freq
)
elif isinstance(message, L2SubReqMsg):
sub = self.L2DataSubscription(
sender_id, current_time, message.freq, message.depth
)
elif isinstance(message, L3SubReqMsg):
sub = self.L3DataSubscription(
sender_id, current_time, message.freq, message.depth
)
elif isinstance(message, TransactedVolSubReqMsg):
sub = self.TransactedVolDataSubscription(
sender_id, current_time, message.freq, message.lookback
)
elif isinstance(message, BookImbalanceSubReqMsg):
sub = self.BookImbalanceDataSubscription(
sender_id, current_time, False, message.min_imbalance
)
else:
raise Exception
self.data_subscriptions[message.symbol].append(sub)
if isinstance(message, MarketHoursRequestMsg):
logger.debug(
"{} received market hours request from agent {}".format(
self.name, sender_id
)
)
# The exchange is permitted to respond to requests for simple
# immutable data (like "what are your hours?") instantly. This does
# NOT include anything that queries mutable data, like equity quotes
# or trades.
self.set_computation_delay(0)
self.send_message(sender_id, MarketHoursMsg(self.mkt_open, self.mkt_close))
elif isinstance(message, MarketClosePriceRequestMsg):
self.market_close_price_subscriptions.append(sender_id)
elif isinstance(message, QueryLastTradeMsg):
symbol = message.symbol
if symbol not in self.order_books:
warnings.warn(f"Last trade request discarded. Unknown symbol: {symbol}")
else:
logger.debug(
"{} received QUERY_LAST_TRADE ({}) request from agent {}".format(
self.name, symbol, sender_id
)
)
# Return the single last executed trade price (currently not
# volume) for the requested symbol. This will return the average
# share price if multiple executions resulted from a single order.
self.send_message(
sender_id,
QueryLastTradeResponseMsg(
symbol=symbol,
last_trade=self.order_books[symbol].last_trade,
mkt_closed=current_time > self.mkt_close,
),
)
elif isinstance(message, QuerySpreadMsg):
symbol = message.symbol
depth = message.depth
if symbol not in self.order_books:
warnings.warn(
f"Bid-ask spread request discarded. Unknown symbol: {symbol}"
)
else:
logger.debug(
"{} received QUERY_SPREAD ({}:{}) request from agent {}".format(
self.name, symbol, depth, sender_id
)
)
# Return the requested depth on both sides of the order book for
# the requested symbol. Returns price levels and aggregated
# volume at each level (not individual orders).
self.send_message(
sender_id,
QuerySpreadResponseMsg(
symbol=symbol,
depth=depth,
bids=self.order_books[symbol].get_l2_bid_data(depth),
asks=self.order_books[symbol].get_l2_ask_data(depth),
last_trade=self.order_books[symbol].last_trade,
mkt_closed=current_time > self.mkt_close,
),
)
# It is possible to also send the pretty-printed order book to
# the agent for logging, but forcing pretty-printing of a large
# order book is very slow, so we should only do it with good
# reason. We don't currently have a configurable option for it.
# "book": self.order_books[symbol].pretty_print(silent=True) }))
elif isinstance(message, QueryOrderStreamMsg):
symbol = message.symbol
length = message.length
if symbol not in self.order_books:
warnings.warn(
f"Order stream request discarded. Unknown symbol: {symbol}"
)
else:
logger.debug(
"{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}".format(
self.name, symbol, length, sender_id
)
)
# We return indices [1:length] inclusive because the agent will want
# "orders leading up to the last L trades", and the items under
# index 0 are more recent than the last trade.
self.send_message(
sender_id,
QueryOrderStreamResponseMsg(
symbol=symbol,
length=length,
orders=self.order_books[symbol].history[1 : length + 1],
mkt_closed=current_time > self.mkt_close,
),
)
elif isinstance(message, QueryTransactedVolMsg):
symbol = message.symbol
lookback_period = message.lookback_period
if symbol not in self.order_books:
warnings.warn(
f"Order stream request discarded. Unknown symbol: {symbol}"
)
else:
logger.debug(
"{} received QUERY_TRANSACTED_VOLUME ({}:{}) request from agent {}".format(
self.name, symbol, lookback_period, sender_id
)
)
bid_volume, ask_volume = self.order_books[symbol].get_transacted_volume(
lookback_period
)
self.send_message(
sender_id,
QueryTransactedVolResponseMsg(
symbol=symbol,
bid_volume=bid_volume,
ask_volume=ask_volume,
mkt_closed=current_time > self.mkt_close,
),
)
elif isinstance(message, LimitOrderMsg):
logger.debug("{} received LIMIT_ORDER: {}".format(self.name, message.order))
if message.order.symbol not in self.order_books:
warnings.warn(
f"Limit Order discarded. Unknown symbol: {message.order.symbol}"
)
else:
# Hand the order to the order book for processing.
self.order_books[message.order.symbol].handle_limit_order(
deepcopy(message.order)
)
self.publish_order_book_data()
elif isinstance(message, MarketOrderMsg):
logger.debug(
"{} received MARKET_ORDER: {}".format(self.name, message.order)
)
if message.order.symbol not in self.order_books:
warnings.warn(
f"Market Order discarded. Unknown symbol: {message.order.symbol}"
)
else:
# Hand the market order to the order book for processing.
self.order_books[message.order.symbol].handle_market_order(
deepcopy(message.order)
)
self.publish_order_book_data()
elif isinstance(message, CancelOrderMsg):
tag = message.tag
metadata = message.metadata
logger.debug(
"{} received CANCEL_ORDER: {}".format(self.name, message.order)
)
if message.order.symbol not in self.order_books:
warnings.warn(
f"Cancellation request discarded. Unknown symbol: {message.order.symbol}"
)
else:
# Hand the order to the order book for processing.
self.order_books[message.order.symbol].cancel_order(
deepcopy(message.order), tag, metadata
)
self.publish_order_book_data()
elif isinstance(message, PartialCancelOrderMsg):
tag = message.tag
metadata = message.metadata
logger.debug(
"{} received PARTIAL_CANCEL_ORDER: {}, new order: {}".format(
self.name, message.order, message.quantity
)
)
if message.order.symbol not in self.order_books:
warnings.warn(
f"Modification request discarded. Unknown symbol: {message.order.symbol}"
)
else:
self.order_books[message.order.symbol].partial_cancel_order(
deepcopy(message.order), message.quantity, tag, metadata
)
self.publish_order_book_data()
elif isinstance(message, ModifyOrderMsg):
old_order = message.old_order
new_order = message.new_order
logger.debug(
"{} received MODIFY_ORDER: {}, new order: {}".format(
self.name, old_order, new_order
)
)
if old_order.symbol not in self.order_books:
warnings.warn(
f"Modification request discarded. Unknown symbol: {old_order.symbol}"
)
else:
self.order_books[old_order.symbol].modify_order(
deepcopy(old_order), deepcopy(new_order)
)
self.publish_order_book_data()
elif isinstance(message, ReplaceOrderMsg):
agent_id = message.agent_id
order = message.old_order
new_order = message.new_order
logger.debug(
"{} received REPLACE_ORDER: {}, new order: {}".format(
self.name, order, new_order
)
)
if order.symbol not in self.order_books:
warnings.warn(
f"Replacement request discarded. Unknown symbol: {order.symbol}"
)
else:
self.order_books[order.symbol].replace_order(
agent_id, deepcopy(order), deepcopy(new_order)
)
self.publish_order_book_data()
def publish_order_book_data(self) -> None:
"""
The exchange agents sends an order book update to the agents using the
subscription API if one of the following conditions are met:
1) agent requests ALL order book updates (freq == 0)
2) order book update timestamp > last time agent was updated AND the orderbook
update time stamp is greater than the last agent update time stamp by a
period more than that specified in the freq parameter.
"""
for symbol, data_subs in self.data_subscriptions.items():
book = self.order_books[symbol]
for data_sub in data_subs:
if isinstance(data_sub, self.FrequencyBasedSubscription):
messages = self.handle_frequency_based_data_subscription(
symbol, data_sub
)
elif isinstance(data_sub, self.EventBasedSubscription):
messages = self.handle_event_based_data_subscription(
symbol, data_sub
)
else:
raise Exception("Got invalid data subscription object")
for message in messages:
self.send_message(data_sub.agent_id, message)
if len(messages) > 0:
data_sub.last_update_ts = book.last_update_ts
def handle_frequency_based_data_subscription(
self, symbol: str, data_sub: "ExchangeAgent.FrequencyBasedSubscription"
) -> List[Message]:
book = self.order_books[symbol]
if (book.last_update_ts - data_sub.last_update_ts) < data_sub.freq:
return []
messages = []
if isinstance(data_sub, self.L1DataSubscription):
bid = book.get_l1_bid_data()
ask = book.get_l1_ask_data()
messages.append(
L1DataMsg(symbol, book.last_trade, self.current_time, bid, ask)
)
elif isinstance(data_sub, self.L2DataSubscription):
bids = book.get_l2_bid_data(data_sub.depth)
asks = book.get_l2_ask_data(data_sub.depth)
messages.append(
L2DataMsg(
symbol,
book.last_trade,
self.current_time,
bids,
asks,
)
)
elif isinstance(data_sub, self.L3DataSubscription):
bids = book.get_l3_bid_data(data_sub.depth)
asks = book.get_l3_ask_data(data_sub.depth)
messages.append(
L3DataMsg(
symbol,
book.last_trade,
self.current_time,
bids,
asks,
)
)
elif isinstance(data_sub, self.L3DataSubscription):
bids = book.get_l3_bid_data(data_sub.depth)
asks = book.get_l3_ask_data(data_sub.depth)
messages.append(
L3DataMsg(
symbol,
book.last_trade,
self.current_time,
bids,
asks,
)
)
elif isinstance(data_sub, self.TransactedVolDataSubscription):
bid_volume, ask_volume = book.get_transacted_volume(data_sub.lookback)
messages.append(
TransactedVolDataMsg(
symbol,
book.last_trade,
self.current_time,
bid_volume,
ask_volume,
)
)
else:
raise Exception("Got invalid data subscription object")
return messages
def handle_event_based_data_subscription(
self, symbol: str, data_sub: "ExchangeAgent.EventBasedSubscription"
) -> List[Message]:
book = self.order_books[symbol]
messages = []
if isinstance(data_sub, self.BookImbalanceDataSubscription):
imbalance, side = book.get_imbalance()
event_in_progress = imbalance > data_sub.min_imbalance
# 4 different combinations of current state vs. new state to consider:
if data_sub.event_in_progress and event_in_progress:
# Event in progress --> Event in progress
if side != data_sub.side:
# If imbalance flips from one side of the market to the other in one step
# Close current event
messages.append(
BookImbalanceDataMsg(
symbol,
book.last_trade,
self.current_time,
MarketDataEventMsg.Stage.FINISH,
data_sub.imbalance,
data_sub.side,
)
)
# Start new event
data_sub.event_in_progress = True
data_sub.side = side
data_sub.imbalance = imbalance
messages.append(
BookImbalanceDataMsg(
symbol,
book.last_trade,
self.current_time,
MarketDataEventMsg.Stage.START,
imbalance,
side,
)
)
elif data_sub.event_in_progress and not event_in_progress:
# Event in progress --> Event not in progress
data_sub.event_in_progress = False
data_sub.side = None
data_sub.imbalance = None
messages.append(
BookImbalanceDataMsg(
symbol,
book.last_trade,
self.current_time,
MarketDataEventMsg.Stage.FINISH,
imbalance,
side,
)
)
elif not data_sub.event_in_progress and event_in_progress:
# Event not in progress --> Event in progress
data_sub.event_in_progress = True
data_sub.side = side
data_sub.imbalance = imbalance
messages.append(
BookImbalanceDataMsg(
symbol,
book.last_trade,
self.current_time,
MarketDataEventMsg.Stage.START,
imbalance,
side,
)
)
elif not data_sub.event_in_progress and not event_in_progress:
# Event not in progress --> Event not in progress
pass
else:
raise Exception("Got invalid data subscription object")
return messages
def logL2style(self, symbol: str) -> Optional[Tuple[List, List]]:
book = self.order_books[symbol]
if not book.book_log2:
return None
tmp = book.book_log2
times = []
booktop = []
for t in tmp:
times.append(t["QuoteTime"])
booktop.append([t["bids"], t["asks"]])
return (times, booktop)
def send_message(self, recipient_id: int, message: Message) -> None:
"""
Arguments:
recipient_id:
message:
"""
# The ExchangeAgent automatically applies appropriate parallel processing pipeline delay
# to those message types which require it.
# TODO: probably organize the order types into categories once there are more, so we can
# take action by category (e.g. ORDER-related messages) instead of enumerating all message
# types to be affected.
if isinstance(message, (OrderAcceptedMsg, OrderCancelledMsg, OrderExecutedMsg)):
# Messages that require order book modification (not simple queries) incur the additional
# parallel processing delay as configured.
super().send_message(recipient_id, message, delay=self.pipeline_delay)
if self.log_orders:
self.logEvent(message.type(), message.order.to_dict())
else:
# Other message types incur only the currently-configured computation delay for this agent.
super().send_message(recipient_id, message)
def analyse_order_book(self, symbol: str):
# will grow with time
book = self.order_books[symbol].book_log2
self.get_time_dropout(book, symbol)
def get_time_dropout(self, book: List[Dict[str, Any]], symbol: str):
if len(book) == 0:
return
df = pd.DataFrame(book)
total_time = df["QuoteTime"].iloc[-1] - df["QuoteTime"].iloc[0]
is_null_bids = False
t_null_bids_first = 0
T_null_bids = 0
is_null_asks = False
t_null_asks_first = 0
T_null_asks = 0
for _, row in df.iterrows():
if (len(row["bids"]) == 0) & (is_null_bids == False):
t_null_bids_first = row["QuoteTime"]
is_null_bids = True
elif (len(row["bids"]) != 0) & (is_null_bids == True):
T_null_bids += row["QuoteTime"] - t_null_bids_first
is_null_bids = False
if (len(row["asks"]) == 0) & (is_null_asks == False):
t_null_asks_first = row["QuoteTime"]
is_null_asks = True
elif (len(row["asks"]) != 0) & (is_null_asks == True):
T_null_asks += row["QuoteTime"] - t_null_asks_first
is_null_asks = False
self.metric_trackers[symbol] = self.MetricTracker(
total_time_no_liquidity_asks=T_null_asks / 1e9,
total_time_no_liquidity_bids=T_null_bids / 1e9,
pct_time_no_liquidity_asks=100 * T_null_asks / total_time,
pct_time_no_liquidity_bids=100 * T_null_bids / total_time,
)
| 35,904 | 36.994709 | 109 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/utils.py
|
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from ..price_level import PriceLevel
################## STATE MANIPULATION ###############################
def list_dict_flip(ld: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
"""
Utility that returns a dictionnary of list of dictionnary into a dictionary of list
Arguments:
- ld: list of dictionaary
Returns:
- flipped: dictionnary of lists
Example:
- ld = [{"a":1, "b":2}, {"a":3, "b":4}]
- flipped = {'a': [1, 3], 'b': [2, 4]}
"""
flipped = dict((k, []) for (k, v) in ld[0].items())
for rs in ld:
for k in flipped.keys():
flipped[k].append(rs[k])
return flipped
def identity_decorator(func):
"""
identy for decorators: take a function and return that same function
Arguments:
- func: function
Returns:
- wrapper_identity_decorator: function
"""
def wrapper_identity_decorator(*args, **kvargs):
return func(*args, **kvargs)
return wrapper_identity_decorator
def ignore_mkt_data_buffer_decorator(func):
"""
Decorator for function that takes as input self and raw_state.
Applies the given function while ignoring the buffering in the market data.
Only last element of the market data buffer is kept
Arguments:
- func: function
Returns:
- wrapper_mkt_data_buffer_decorator: function
"""
def wrapper_mkt_data_buffer_decorator(self, raw_state):
raw_state_copy = deepcopy(raw_state)
for i in range(len(raw_state)):
raw_state[i]["parsed_mkt_data"] = raw_state_copy[i]["parsed_mkt_data"][-1]
raw_state[i]["parsed_volume_data"] = raw_state_copy[i][
"parsed_volume_data"
][-1]
raw_state2 = list_dict_flip(raw_state)
flipped = dict((k, list_dict_flip(v)) for (k, v) in raw_state2.items())
return func(self, flipped)
return wrapper_mkt_data_buffer_decorator
def ignore_buffers_decorator(func):
"""
Decorator for function that takes as input self and raw_state.
Applies the given function while ignoring the buffering in both the market data and the general raw state.
Only last elements are kept.
Arguments:
- func: function
Returns:
- wrapper_mkt_data_buffer_decorator: function
"""
def wrapper_ignore_buffers_decorator(self, raw_state):
raw_state = raw_state[-1]
if len(raw_state["parsed_mkt_data"]) == 0:
pass
else:
raw_state["parsed_mkt_data"] = raw_state["parsed_mkt_data"][-1]
if raw_state["parsed_volume_data"]:
raw_state["parsed_volume_data"] = raw_state["parsed_volume_data"][-1]
return func(self, raw_state)
return wrapper_ignore_buffers_decorator
################# ORDERBOOK PRIMITIVES ######################
def get_mid_price(
bids: List[PriceLevel], asks: List[PriceLevel], last_transaction: int
) -> int:
"""
Utility that computes the mid price from the snapshot of bid and ask side
Arguments:
- bids: list of list snapshot of bid side
- asks: list of list snapshot of ask side
- last_trasaction: last transaction in the market, used for corner cases when one side of the OB is empty
Returns:
- mid_price value
"""
if len(bids) == 0 and len(asks) == 0:
return last_transaction
elif len(bids) == 0:
return asks[0][0]
elif len(asks) == 0:
return bids[0][0]
else:
return (bids[0][0] + asks[0][0]) / 2
def get_val(book: List[PriceLevel], level: int) -> Tuple[int, int]:
"""
utility to compute the price and level at the level-th level of the order book
Arguments:
- book: side of the order book (bid or ask)
- level: level of interest in the OB side (index starts at 0 for best bid/ask)
Returns:
- tuple price, volume for the i-th value
"""
if book == []:
return 0, 0
else:
try:
price = book[level][0]
volume = book[level][1]
return price, volume
except:
return 0, 0
def get_last_val(book: List[PriceLevel], mid_price: int) -> int:
"""
utility to compute the price of the deepest placed order in the side of the order book
Arguments:
- book: side of the order book (bid or ask)
- mid_price: current mid price used for corner cases
Returns:
- mid price value
"""
if book == []:
return mid_price
else:
return book[-1][0]
def get_volume(book: List[PriceLevel], depth: Optional[int] = None) -> int:
"""
utility to compute the volume placed between the top of the book (depth 0) and the depth
Arguments:
- book: side of the order book (bid or ask)
- depth: depth used to compute sum of the volume
Returns:
- volume placed
"""
if depth is None:
return sum([v[1] for v in book])
else:
return sum([v[1] for v in book[:depth]])
def get_imbalance(
bids: List[PriceLevel],
asks: List[PriceLevel],
direction: str = "BUY",
depth: Optional[int] = None,
) -> float:
"""
utility to compute the imbalance computed between the top of the book and the depth-th value of depth
Arguments:
- bids: list of list snapshot of bid side
- asks: list of list snapshot of ask side
- direction: side used to compute the numerator in the division
- depth: depth used to compute sum of the volume
Returns:
- imbalance
"""
# None corresponds to the whole book depth
if (bids == []) and (asks == []):
return 0.5
elif bids == []:
if direction == "BUY":
return 0
else:
return 1
elif asks == []:
if direction == "BUY":
return 1
else:
return 0
else:
if depth == None:
bid_vol = sum([v[1] for v in bids])
ask_vol = sum([v[1] for v in asks])
else:
bid_vol = sum([v[1] for v in bids[:depth]])
ask_vol = sum([v[1] for v in asks[:depth]])
if direction == "BUY":
return bid_vol / (bid_vol + ask_vol)
else:
return ask_vol / (bid_vol + ask_vol)
| 6,370 | 28.771028 | 113 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/__init__.py
|
from .examples.momentum_agent import MomentumAgent
from .market_makers.adaptive_market_maker_agent import AdaptiveMarketMakerAgent
from .exchange_agent import ExchangeAgent
from .financial_agent import FinancialAgent
from .noise_agent import NoiseAgent
from .trading_agent import TradingAgent
from .value_agent import ValueAgent
| 331 | 32.2 | 79 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/financial_agent.py
|
from typing import List, Optional, Union
import numpy as np
from abides_core import Agent
from ..utils import dollarize
class FinancialAgent(Agent):
"""
The FinancialAgent class contains attributes and methods that should be available to
all agent types (traders, exchanges, etc) in a financial market simulation.
To be honest, it mainly exists because the base Agent class should not have any
finance-specific aspects and it doesn't make sense for ExchangeAgent to inherit from
TradingAgent. Hopefully we'll find more common ground for traders and exchanges to
make this more useful later on.
"""
def __init__(
self,
id: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
) -> None:
# Base class init.
super().__init__(id, name, type, random_state)
def dollarize(self, cents: Union[List[int], int]) -> Union[List[str], str]:
"""
Used by any subclass to dollarize an int-cents price for printing.
"""
return dollarize(cents)
| 1,129 | 30.388889 | 88 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/examples/momentum_agent.py
|
from typing import List, Optional
import numpy as np
from abides_core import Message, NanosecondTime
from abides_core.utils import str_to_ns
from ...messages.marketdata import MarketDataMsg, L2SubReqMsg
from ...messages.query import QuerySpreadResponseMsg
from ...orders import Side
from ..trading_agent import TradingAgent
class MomentumAgent(TradingAgent):
"""
Simple Trading Agent that compares the 20 past mid-price observations with the 50 past observations and places a
buy limit order if the 20 mid-price average >= 50 mid-price average or a
sell limit order if the 20 mid-price average < 50 mid-price average
"""
def __init__(
self,
id: int,
symbol,
starting_cash,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
min_size=20,
max_size=50,
wake_up_freq: NanosecondTime = str_to_ns("60s"),
poisson_arrival=True,
order_size_model=None,
subscribe=False,
log_orders=False,
) -> None:
super().__init__(id, name, type, random_state, starting_cash, log_orders)
self.symbol = symbol
self.min_size = min_size # Minimum order size
self.max_size = max_size # Maximum order size
self.size = (
self.random_state.randint(self.min_size, self.max_size)
if order_size_model is None
else None
)
self.order_size_model = order_size_model # Probabilistic model for order size
self.wake_up_freq = wake_up_freq
self.poisson_arrival = poisson_arrival # Whether to arrive as a Poisson process
if self.poisson_arrival:
self.arrival_rate = self.wake_up_freq
self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
self.subscription_requested = False
self.mid_list: List[float] = []
self.avg_20_list: List[float] = []
self.avg_50_list: List[float] = []
self.log_orders = log_orders
self.state = "AWAITING_WAKEUP"
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime) -> None:
"""Agent wakeup is determined by self.wake_up_freq"""
can_trade = super().wakeup(current_time)
if self.subscribe and not self.subscription_requested:
super().request_data_subscription(
L2SubReqMsg(
symbol=self.symbol,
freq=int(10e9),
depth=1,
)
)
self.subscription_requested = True
self.state = "AWAITING_MARKET_DATA"
elif can_trade and not self.subscribe:
self.get_current_spread(self.symbol)
self.state = "AWAITING_SPREAD"
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""Momentum agent actions are determined after obtaining the best bid and ask in the LOB"""
super().receive_message(current_time, sender_id, message)
if (
not self.subscribe
and self.state == "AWAITING_SPREAD"
and isinstance(message, QuerySpreadResponseMsg)
):
bid, _, ask, _ = self.get_known_bid_ask(self.symbol)
self.place_orders(bid, ask)
self.set_wakeup(current_time + self.get_wake_frequency())
self.state = "AWAITING_WAKEUP"
elif (
self.subscribe
and self.state == "AWAITING_MARKET_DATA"
and isinstance(message, MarketDataMsg)
):
bids, asks = self.known_bids[self.symbol], self.known_asks[self.symbol]
if bids and asks:
self.place_orders(bids[0][0], asks[0][0])
self.state = "AWAITING_MARKET_DATA"
def place_orders(self, bid: int, ask: int) -> None:
"""Momentum Agent actions logic"""
if bid and ask:
self.mid_list.append((bid + ask) / 2)
if len(self.mid_list) > 20:
self.avg_20_list.append(
MomentumAgent.ma(self.mid_list, n=20)[-1].round(2)
)
if len(self.mid_list) > 50:
self.avg_50_list.append(
MomentumAgent.ma(self.mid_list, n=50)[-1].round(2)
)
if len(self.avg_20_list) > 0 and len(self.avg_50_list) > 0:
if self.order_size_model is not None:
self.size = self.order_size_model.sample(
random_state=self.random_state
)
if self.size > 0:
if self.avg_20_list[-1] >= self.avg_50_list[-1]:
self.place_limit_order(
self.symbol,
quantity=self.size,
side=Side.BID,
limit_price=ask,
)
else:
self.place_limit_order(
self.symbol,
quantity=self.size,
side=Side.ASK,
limit_price=bid,
)
def get_wake_frequency(self) -> NanosecondTime:
if not self.poisson_arrival:
return self.wake_up_freq
else:
delta_time = self.random_state.exponential(scale=self.arrival_rate)
return int(round(delta_time))
@staticmethod
def ma(a, n=20):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
| 5,795 | 37.384106 | 116 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/examples/__init__.py
|
from .momentum_agent import MomentumAgent
| 42 | 20.5 | 41 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/market_makers/adaptive_market_maker_agent.py
|
import logging
from math import floor, ceil
from typing import Dict, List, Optional, Tuple
import numpy as np
from abides_core import Message, NanosecondTime
from ...utils import sigmoid
from ...messages.marketdata import (
MarketDataMsg,
L2SubReqMsg,
BookImbalanceDataMsg,
BookImbalanceSubReqMsg,
MarketDataEventMsg,
)
from ...messages.query import QuerySpreadResponseMsg, QueryTransactedVolResponseMsg
from ...orders import Side
from ..trading_agent import TradingAgent
ANCHOR_TOP_STR = "top"
ANCHOR_BOTTOM_STR = "bottom"
ANCHOR_MIDDLE_STR = "middle"
ADAPTIVE_SPREAD_STR = "adaptive"
INITIAL_SPREAD_VALUE = 50
logger = logging.getLogger(__name__)
class AdaptiveMarketMakerAgent(TradingAgent):
"""This class implements a modification of the Chakraborty-Kearns `ladder` market-making strategy, wherein the
the size of order placed at each level is set as a fraction of measured transacted volume in the previous time
period.
Can skew orders to size of current inventory using beta parameter, whence beta == 0 represents inventory being
ignored and beta == infinity represents all liquidity placed on one side of book.
ADAPTIVE SPREAD: the market maker's spread can be set either as a fixed or value or can be adaptive,
"""
def __init__(
self,
id: int,
symbol: str,
starting_cash: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
pov: float = 0.05,
min_order_size: int = 20,
window_size: float = 5,
anchor: str = ANCHOR_MIDDLE_STR,
num_ticks: int = 20,
level_spacing: float = 0.5,
wake_up_freq: NanosecondTime = 1_000_000_000, # 1 second
poisson_arrival: bool = True,
subscribe: bool = False,
subscribe_freq: float = 10e9,
subscribe_num_levels: int = 1,
cancel_limit_delay: int = 50,
skew_beta=0,
price_skew_param=None,
spread_alpha: float = 0.85,
backstop_quantity: int = 0,
log_orders: bool = False,
min_imbalance=0.9,
) -> None:
super().__init__(id, name, type, random_state, starting_cash, log_orders)
self.is_adaptive: bool = False
self.symbol: str = symbol # Symbol traded
self.pov: float = (
pov # fraction of transacted volume placed at each price level
)
self.min_order_size: int = (
min_order_size # minimum size order to place at each level, if pov <= min
)
self.anchor: str = self.validate_anchor(
anchor
) # anchor either top of window or bottom of window to mid-price
self.window_size: float = self.validate_window_size(
window_size
) # Size in ticks (cents) of how wide the window around mid price is. If equal to
# string 'adaptive' then ladder starts at best bid and ask
self.num_ticks: int = num_ticks # number of ticks on each side of window in which to place liquidity
self.level_spacing: float = (
level_spacing # level spacing as a fraction of the spread
)
self.wake_up_freq: str = wake_up_freq # Frequency of agent wake up
self.poisson_arrival: bool = (
poisson_arrival # Whether to arrive as a Poisson process
)
if self.poisson_arrival:
self.arrival_rate = self.wake_up_freq
self.subscribe: bool = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
self.subscribe_freq: float = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
# in subscribe mode
self.min_imbalance = min_imbalance
self.subscribe_num_levels: int = (
subscribe_num_levels # Number of orderbook levels in subscription mode
)
self.cancel_limit_delay: int = cancel_limit_delay # delay in nanoseconds between order cancellations and new limit order placements
self.skew_beta = (
skew_beta # parameter for determining order placement imbalance
)
self.price_skew_param = (
price_skew_param # parameter determining how much to skew price level.
)
self.spread_alpha: float = spread_alpha # parameter for exponentially weighted moving average of spread. 1 corresponds to ignoring old values, 0 corresponds to no updates
self.backstop_quantity: int = backstop_quantity # how many orders to place at outside order level, to prevent liquidity dropouts. If None then place same as at other levels.
self.log_orders: float = log_orders
self.has_subscribed = False
## Internal variables
self.subscription_requested: bool = False
self.state: Dict[str, bool] = self.initialise_state()
self.buy_order_size: int = self.min_order_size
self.sell_order_size: int = self.min_order_size
self.last_mid: Optional[int] = None # last observed mid price
self.last_spread: float = (
INITIAL_SPREAD_VALUE # last observed spread moving average
)
self.tick_size: Optional[int] = (
None if self.is_adaptive else ceil(self.last_spread * self.level_spacing)
)
self.LIQUIDITY_DROPOUT_WARNING: str = (
f"Liquidity dropout for agent {self.name}."
)
self.two_side: bool = (
False if self.price_skew_param is None else True
) # switch to control self.get_transacted_volume
# method
def initialise_state(self) -> Dict[str, bool]:
"""Returns variables that keep track of whether spread and transacted volume have been observed."""
if self.subscribe:
return {"AWAITING_MARKET_DATA": True, "AWAITING_TRANSACTED_VOLUME": True}
else:
return {"AWAITING_SPREAD": True, "AWAITING_TRANSACTED_VOLUME": True}
def validate_anchor(self, anchor: str) -> str:
"""Checks that input parameter anchor takes allowed value, raises ``ValueError`` if not.
Arguments:
anchor:
Returns:
The anchor if validated.
"""
if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR, ANCHOR_MIDDLE_STR]:
raise ValueError(
f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}`, `{ANCHOR_MIDDLE_STR}` or "
f"`{ANCHOR_TOP_STR}`"
)
else:
return anchor
def validate_window_size(self, window_size: float) -> Optional[int]:
"""Checks that input parameter window_size takes allowed value, raises ``ValueError`` if not.
Arguments:
window_size:
Returns:
The window_size if validated
"""
try: # fixed window size specified
return int(window_size)
except:
if window_size.lower() == "adaptive":
self.is_adaptive = True
self.anchor = ANCHOR_MIDDLE_STR
return None
else:
raise ValueError(
f"Variable window_size must be of type int or string {ADAPTIVE_SPREAD_STR}."
)
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime):
"""Agent wakeup is determined by self.wake_up_freq."""
can_trade = super().wakeup(current_time)
if not self.has_subscribed:
super().request_data_subscription(
BookImbalanceSubReqMsg(
symbol=self.symbol,
min_imbalance=self.min_imbalance,
)
)
self.last_time_book_order = current_time
self.has_subscribed = True
if self.subscribe and not self.subscription_requested:
super().request_data_subscription(
L2SubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
depth=self.subscribe_num_levels,
)
)
self.subscription_requested = True
self.get_transacted_volume(self.symbol, lookback_period=self.subscribe_freq)
self.state = self.initialise_state()
elif can_trade and not self.subscribe:
self.cancel_all_orders()
self.delay(self.cancel_limit_delay)
self.get_current_spread(self.symbol, depth=self.subscribe_num_levels)
self.get_transacted_volume(self.symbol, lookback_period=self.wake_up_freq)
self.initialise_state()
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""Processes message from exchange.
Main function is to update orders in orderbook relative to mid-price.
Arguments:
current_time: Simulation current time.
message: Message received by self from ExchangeAgent.
"""
super().receive_message(current_time, sender_id, message)
mid = None
if self.last_mid is not None:
mid = self.last_mid
if self.last_spread is not None and self.is_adaptive:
self._adaptive_update_window_and_tick_size()
if (
isinstance(message, QueryTransactedVolResponseMsg)
and self.state["AWAITING_TRANSACTED_VOLUME"] is True
):
self.update_order_size()
self.state["AWAITING_TRANSACTED_VOLUME"] = False
if isinstance(message, BookImbalanceDataMsg):
if message.stage == MarketDataEventMsg.Stage.START:
try:
self.place_orders(mid)
self.last_time_book_order = current_time
except:
pass
if not self.subscribe:
if (
isinstance(message, QuerySpreadResponseMsg)
and self.state["AWAITING_SPREAD"] is True
):
bid, _, ask, _ = self.get_known_bid_ask(self.symbol)
if bid and ask:
mid = int((ask + bid) / 2)
self.last_mid = mid
if self.is_adaptive:
spread = int(ask - bid)
self._adaptive_update_spread(spread)
self.state["AWAITING_SPREAD"] = False
else:
logger.debug("SPREAD MISSING at time {}", current_time)
self.state[
"AWAITING_SPREAD"
] = False # use last mid price and spread
if (
self.state["AWAITING_SPREAD"] is False
and self.state["AWAITING_TRANSACTED_VOLUME"] is False
and mid is not None
):
self.place_orders(mid)
self.state = self.initialise_state()
self.set_wakeup(current_time + self.get_wake_frequency())
else: # subscription mode
if (
isinstance(message, MarketDataMsg)
and self.state["AWAITING_MARKET_DATA"] is True
):
bid = (
self.known_bids[self.symbol][0][0]
if self.known_bids[self.symbol]
else None
)
ask = (
self.known_asks[self.symbol][0][0]
if self.known_asks[self.symbol]
else None
)
if bid and ask:
mid = int((ask + bid) / 2)
self.last_mid = mid
if self.is_adaptive:
spread = int(ask - bid)
self._adaptive_update_spread(spread)
self.state["AWAITING_MARKET_DATA"] = False
else:
logger.debug("SPREAD MISSING at time {}", current_time)
self.state["AWAITING_MARKET_DATA"] = False
if (
self.state["MARKET_DATA"] is False
and self.state["AWAITING_TRANSACTED_VOLUME"] is False
):
self.place_orders(mid)
self.state = self.initialise_state()
def _adaptive_update_spread(self, spread) -> None:
"""Update internal spread estimate with exponentially weighted moving average.
Arguments:
spread
"""
spread_ewma = (
self.spread_alpha * spread + (1 - self.spread_alpha) * self.last_spread
)
self.window_size = spread_ewma
self.last_spread = spread_ewma
def _adaptive_update_window_and_tick_size(self) -> None:
"""Update window size and tick size relative to internal spread estimate."""
self.window_size = self.last_spread
self.tick_size = round(self.level_spacing * self.window_size)
if self.tick_size == 0:
self.tick_size = 1
def update_order_size(self) -> None:
"""Updates size of order to be placed."""
buy_transacted_volume = self.transacted_volume[self.symbol][0]
sell_transacted_volume = self.transacted_volume[self.symbol][1]
total_transacted_volume = buy_transacted_volume + sell_transacted_volume
qty = round(self.pov * total_transacted_volume)
if self.skew_beta == 0: # ignore inventory
self.buy_order_size = (
qty if qty >= self.min_order_size else self.min_order_size
)
self.sell_order_size = (
qty if qty >= self.min_order_size else self.min_order_size
)
else:
holdings = self.get_holdings(self.symbol)
proportion_sell = sigmoid(holdings, self.skew_beta)
sell_size = ceil(proportion_sell * qty)
buy_size = floor((1 - proportion_sell) * qty)
self.buy_order_size = (
buy_size if buy_size >= self.min_order_size else self.min_order_size
)
self.sell_order_size = (
sell_size if sell_size >= self.min_order_size else self.min_order_size
)
def compute_orders_to_place(self, mid: int) -> Tuple[List[int], List[int]]:
"""Given a mid price, computes the orders that need to be removed from
orderbook, and adds these orders to bid and ask deques.
Arguments:
mid: Mid price.
"""
if self.price_skew_param is None:
mid_point = mid
else:
buy_transacted_volume = self.transacted_volume[self.symbol][0]
sell_transacted_volume = self.transacted_volume[self.symbol][1]
if (buy_transacted_volume == 0) and (sell_transacted_volume == 0):
mid_point = mid
else:
# trade imbalance, +1 means all transactions are buy, -1 means all transactions are sell
trade_imbalance = (
2
* buy_transacted_volume
/ (buy_transacted_volume + sell_transacted_volume)
) - 1
mid_point = int(mid + (trade_imbalance * self.price_skew_param))
if self.anchor == ANCHOR_MIDDLE_STR:
highest_bid = int(mid_point) - floor(0.5 * self.window_size)
lowest_ask = int(mid_point) + ceil(0.5 * self.window_size)
elif self.anchor == ANCHOR_BOTTOM_STR:
highest_bid = int(mid_point - 1)
lowest_ask = int(mid_point + self.window_size)
elif self.anchor == ANCHOR_TOP_STR:
highest_bid = int(mid_point - self.window_size)
lowest_ask = int(mid_point + 1)
lowest_bid = highest_bid - ((self.num_ticks - 1) * self.tick_size)
highest_ask = lowest_ask + ((self.num_ticks - 1) * self.tick_size)
bids_to_place = [
price
for price in range(lowest_bid, highest_bid + self.tick_size, self.tick_size)
]
asks_to_place = [
price
for price in range(lowest_ask, highest_ask + self.tick_size, self.tick_size)
]
return bids_to_place, asks_to_place
def place_orders(self, mid: int) -> None:
"""Given a mid-price, compute new orders that need to be placed, then
send the orders to the Exchange.
Arguments:
mid: Mid price.
"""
bid_orders, ask_orders = self.compute_orders_to_place(mid)
orders = []
if self.backstop_quantity != 0:
bid_price = bid_orders[0]
logger.debug(
"{}: Placing BUY limit order of size {} @ price {}",
self.name,
self.backstop_quantity,
bid_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.backstop_quantity, Side.BID, bid_price
)
)
bid_orders = bid_orders[1:]
ask_price = ask_orders[-1]
logger.debug(
"{}: Placing SELL limit order of size {} @ price {}",
self.name,
self.backstop_quantity,
ask_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.backstop_quantity, Side.ASK, ask_price
)
)
ask_orders = ask_orders[:-1]
for bid_price in bid_orders:
logger.debug(
"{}: Placing BUY limit order of size {} @ price {}",
self.name,
self.buy_order_size,
bid_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.buy_order_size, Side.BID, bid_price
)
)
for ask_price in ask_orders:
logger.debug(
"{}: Placing SELL limit order of size {} @ price {}",
self.name,
self.sell_order_size,
ask_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.sell_order_size, Side.ASK, ask_price
)
)
self.place_multiple_orders(orders)
def get_wake_frequency(self) -> NanosecondTime:
if not self.poisson_arrival:
return self.wake_up_freq
else:
delta_time = self.random_state.exponential(scale=self.arrival_rate)
return int(round(delta_time))
| 18,715 | 36.357285 | 182 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/market_makers/__init__.py
|
from .adaptive_market_maker_agent import AdaptiveMarketMakerAgent
| 66 | 32.5 | 65 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/background_v2/core_background_agent.py
|
from collections import deque
from copy import deepcopy
from typing import Any, Deque, Dict, List, Optional, Tuple
import numpy as np
from abides_core import Message, NanosecondTime
from abides_core.generators import ConstantTimeGenerator, InterArrivalTimeGenerator
from abides_core.utils import str_to_ns
from abides_markets.agents.trading_agent import TradingAgent
from abides_markets.messages.marketdata import (
MarketDataMsg,
L2SubReqMsg,
TransactedVolDataMsg,
)
from abides_markets.messages.marketdata import (
L2DataMsg,
L2SubReqMsg,
TransactedVolSubReqMsg,
)
from abides_markets.orders import Order, Side
class CoreBackgroundAgent(TradingAgent):
def __init__(
self,
id: int,
symbol: str,
starting_cash: int,
subscribe_freq: int = int(1e8),
lookback_period: Optional[int] = None, # for volume subscription
subscribe: bool = True,
subscribe_num_levels: Optional[int] = None,
wakeup_interval_generator: InterArrivalTimeGenerator = ConstantTimeGenerator(
step_duration=str_to_ns("1min")
),
order_size_generator=None, # TODO: not sure about this one
state_buffer_length: int = 2,
market_data_buffer_length: int = 5,
first_interval: Optional[NanosecondTime] = None,
log_orders: bool = False,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
) -> None:
super().__init__(
id,
starting_cash=starting_cash,
log_orders=log_orders,
name=name,
type=type,
random_state=random_state,
)
self.symbol: str = symbol
# Frequency of agent data subscription up in ns-1
self.subscribe_freq: int = subscribe_freq
self.subscribe: bool = subscribe
self.subscribe_num_levels: int = subscribe_num_levels
self.first_interval: Optional[NanosecondTime] = first_interval
self.wakeup_interval_generator: InterArrivalTimeGenerator = (
wakeup_interval_generator
)
self.order_size_generator = (
order_size_generator # TODO: no diea here for typing
)
if hasattr(self.wakeup_interval_generator, "random_generator"):
self.wakeup_interval_generator.random_generator = self.random_state
self.state_buffer_length: int = state_buffer_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: Optional[NanosecondTime] = first_interval
if self.order_size_generator != None: # TODO: check this one
self.order_size_generator.random_generator = self.random_state
self.lookback_period: NanosecondTime = self.wakeup_interval_generator.mean()
# internal variables
self.has_subscribed: bool = False
self.episode_executed_orders: List[
Order
] = [] # list of executed orders during full episode
# list of executed orders between steps - is reset at every step
self.inter_wakeup_executed_orders: List[
Order
] = [] # list of executed orders between steps - is reset at every step
self.parsed_episode_executed_orders: List[Tuple[int, int]] = [] # (price, qty)
self.parsed_inter_wakeup_executed_orders: List[
Tuple[int, int]
] = [] # (price, qty)
self.parsed_mkt_data: Dict[str, Any] = {}
self.parsed_mkt_data_buffer: Deque[Dict[str, Any]] = deque(
maxlen=self.market_data_buffer_length
)
self.parsed_volume_data = {}
self.parsed_volume_data_buffer: Deque[Dict[str, Any]] = deque(
maxlen=self.market_data_buffer_length
)
self.raw_state: Deque[Dict[str, Any]] = deque(maxlen=self.state_buffer_length)
# dictionary to track order status:
# - keys = order_id
# - value = dictionary {'active'|'cancelled'|'executed', Order, 'active_qty','executed_qty', 'cancelled_qty }
self.order_status: Dict[int, Dict[str, Any]] = {}
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime) -> bool:
# TODO: parent class (TradingAgent) returns bool of "ready to trade"
"""Agent interarrival wake up times are determined by wakeup_interval_generator"""
super().wakeup(current_time)
if not self.has_subscribed:
super().request_data_subscription(
L2SubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
depth=self.subscribe_num_levels,
)
)
super().request_data_subscription(
TransactedVolSubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
lookback=self.lookback_period,
)
)
self.has_subscribed = True
# compute the following wake up
if (self.mkt_open != None) and (
current_time >= self.mkt_open
): # compute the state (returned to the Gym Env)
raw_state = self.act_on_wakeup()
# TODO: wakeup function should return bool
return raw_state
##return non None value so the kernel catches it and stops
# return raw_state
def act_on_wakeup(self):
# Needs type signature
raise NotImplementedError
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
:param simulation current time
:param message received by self from ExchangeAgent
:type current_time: pd.Timestamp
:type message: str
:return:
"""
# TODO: will prob need to see for transacted volume if we enrich the state
super().receive_message(current_time, sender_id, message)
if self.subscribe:
if isinstance(message, MarketDataMsg):
if isinstance(message, L2DataMsg):
self.parsed_mkt_data = self.get_parsed_mkt_data(message)
self.parsed_mkt_data_buffer.append(self.parsed_mkt_data)
elif isinstance(message, TransactedVolDataMsg):
self.parsed_volume_data = self.get_parsed_volume_data(message)
self.parsed_volume_data_buffer.append(self.parsed_volume_data)
def get_wake_frequency(self) -> NanosecondTime:
# first wakeup interval from open
time_first_wakeup = (
self.first_interval
if self.first_interval != None
else self.wakeup_interval_generator.next()
)
return time_first_wakeup
def apply_actions(self, actions: List[Dict[str, Any]]) -> None:
# take action from kernel in general representation
# convert in ABIDES-SIMULATOR API
# print(actions)
# TODO Add cancel in actions
# print(actions)
for action in actions:
if action["type"] == "MKT":
side = Side.BID if action["direction"] == "BUY" else Side.ASK
# print(action['direction'])
# print(side)
self.place_market_order(self.symbol, action["size"], side)
elif action["type"] == "LMT":
side = Side.BID if action["direction"] == "BUY" else Side.ASK
self.place_limit_order(
self.symbol, action["size"], side, action["limit_price"]
)
# TODO: test the cancel based on the id
elif action["type"] == "CCL_ALL":
# order = self.order_status[action['order_id']]
self.cancel_all_orders()
else:
raise ValueError(f"Action Type {action['type']} is not supported")
def update_raw_state(self) -> None:
# mkt data
parsed_mkt_data_buffer = deepcopy(self.parsed_mkt_data_buffer)
# internal data
internal_data = self.get_internal_data()
# volume data
parsed_volume_data_buffer = deepcopy(self.parsed_volume_data_buffer)
new = {
"parsed_mkt_data": parsed_mkt_data_buffer,
"internal_data": internal_data,
"parsed_volume_data": parsed_volume_data_buffer,
}
self.raw_state.append(new)
def get_raw_state(self) -> Dict:
# TODO: Incompatible return value type (got "deque[Any]", expected "Dict[Any, Any]")
return self.raw_state
def get_parsed_mkt_data(self, message: L2DataMsg) -> Dict[str, Any]:
# TODO: probaly will need to include what type of subscription in parameters here
bids = message.bids
asks = message.asks
last_transaction = message.last_transaction
exchange_ts = message.exchange_ts
mkt_data = {
"bids": bids,
"asks": asks,
"last_transaction": last_transaction,
"exchange_ts": exchange_ts,
}
return mkt_data
def get_parsed_volume_data(self, message: TransactedVolDataMsg) -> Dict[str, Any]:
last_transaction = message.last_transaction
exchange_ts = message.exchange_ts
bid_volume = message.bid_volume
ask_volume = message.ask_volume
total_volume = bid_volume + ask_volume
volume_data = {
"last_transaction": last_transaction,
"exchange_ts": exchange_ts,
"bid_volume": bid_volume,
"ask_volume": ask_volume,
"total_volume": total_volume,
}
return volume_data
def get_internal_data(self) -> Dict[str, Any]:
holdings = self.get_holdings(self.symbol)
cash = self.get_holdings("CASH")
inter_wakeup_executed_orders = self.inter_wakeup_executed_orders
episode_executed_orders = self.episode_executed_orders
parsed_episode_executed_orders = self.parsed_episode_executed_orders
parsed_inter_wakeup_executed_orders = self.parsed_inter_wakeup_executed_orders
current_time = self.current_time
order_status = self.order_status
mkt_open = self.mkt_open
mkt_close = self.mkt_close
internal_data = {
"holdings": holdings,
"cash": cash,
"inter_wakeup_executed_orders": inter_wakeup_executed_orders,
"episode_executed_orders": episode_executed_orders,
"parsed_episode_executed_orders": parsed_episode_executed_orders,
"parsed_inter_wakeup_executed_orders": parsed_inter_wakeup_executed_orders,
"starting_cash": self.starting_cash,
"current_time": current_time,
"order_status": order_status,
"mkt_open": mkt_open,
"mkt_close": mkt_close,
}
return internal_data
def order_executed(self, order: Order) -> None:
super().order_executed(order)
# parsing of the order message
executed_qty = order.quantity
executed_price = order.fill_price
assert executed_price is not None
order_id = order.order_id
# step lists
self.inter_wakeup_executed_orders.append(order)
self.parsed_inter_wakeup_executed_orders.append((executed_qty, executed_price))
# episode lists
self.episode_executed_orders.append(order)
self.parsed_episode_executed_orders.append((executed_qty, executed_price))
# update order status dictionnary
# test if it was mkt order and first execution received from it
try:
self.order_status[order_id]
flag = True
except KeyError:
flag = False
if flag:
self.order_status[order_id]["executed_qty"] += executed_qty
self.order_status[order_id]["active_qty"] -= executed_qty
if self.order_status[order_id]["active_qty"] <= 0:
self.order_status[order_id]["status"] = "executed"
else:
self.order_status[order_id] = {
"status": "mkt_immediately_filled",
"order": order,
"active_qty": 0,
"executed_qty": executed_qty,
"cancelled_qty": 0,
}
def order_accepted(self, order: Order) -> None:
super().order_accepted(order)
# update order status dictionnary
self.order_status[order.order_id] = {
"status": "active",
"order": order,
"active_qty": order.quantity,
"executed_qty": 0,
"cancelled_qty": 0,
}
def order_cancelled(self, order: Order) -> None:
super().order_cancelled(order)
order_id = order.order_id
quantity = order.quantity
self.order_status[order_id] = {
"status": "cancelled",
"order": order,
"cancelled_qty": quantity,
}
def new_inter_wakeup_reset(self) -> None:
self.inter_wakeup_executed_orders = (
[]
) # list of executed orders between steps - is reset at every step
self.parsed_inter_wakeup_executed_orders = [] # just tuple (price, qty)
def act(self, raw_state):
# used by the background agent
raise NotImplementedError
def new_step_reset(self) -> None:
self.inter_wakeup_executed_orders = (
[]
) # list of executed orders between steps - is reset at every step
self.parsed_inter_wakeup_executed_orders = [] # just tuple (price, qty)
| 13,819 | 38.827089 | 117 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/agents/background_v2/__init__.py
|
from .core_background_agent import CoreBackgroundAgent
| 55 | 27 | 54 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/configs/rmsc04.py
|
# RMSC-4 (Reference Market Simulation Configuration):
# - 1 Exchange Agent
# - 2 Adaptive Market Maker Agents
# - 102 Value Agents
# - 12 Momentum Agents
# - 1000 Noise Agents
import os
from datetime import datetime
import numpy as np
import pandas as pd
from abides_core.utils import get_wake_time, str_to_ns
from abides_markets.agents import (
ExchangeAgent,
NoiseAgent,
ValueAgent,
AdaptiveMarketMakerAgent,
MomentumAgent,
)
from abides_markets.models import OrderSizeModel
from abides_markets.oracles import SparseMeanRevertingOracle
from abides_markets.utils import generate_latency_model
########################################################################################################################
############################################### GENERAL CONFIG #########################################################
def build_config(
seed=int(datetime.now().timestamp() * 1_000_000) % (2**32 - 1),
date="20210205",
end_time="10:00:00",
stdout_log_level="INFO",
ticker="ABM",
starting_cash=10_000_000, # Cash in this simulator is always in CENTS.
log_orders=True, # if True log everything
# 1) Exchange Agent
book_logging=True,
book_log_depth=10,
stream_history_length=500,
exchange_log_orders=None,
# 2) Noise Agent
num_noise_agents=1000,
# 3) Value Agents
num_value_agents=102,
r_bar=100_000, # true mean fundamental value
kappa=1.67e-15, # Value Agents appraisal of mean-reversion
lambda_a=5.7e-12, # ValueAgent arrival rate
# oracle
kappa_oracle=1.67e-16, # Mean-reversion of fundamental time series.
sigma_s=0,
fund_vol=5e-5, # Volatility of fundamental time series (std).
megashock_lambda_a=2.77778e-18,
megashock_mean=1000,
megashock_var=50_000,
# 4) Market Maker Agents
# each elem of mm_params is tuple (window_size, pov, num_ticks, wake_up_freq, min_order_size)
mm_window_size="adaptive",
mm_pov=0.025,
mm_num_ticks=10,
mm_wake_up_freq="60S",
mm_min_order_size=1,
mm_skew_beta=0,
mm_price_skew=4,
mm_level_spacing=5,
mm_spread_alpha=0.75,
mm_backstop_quantity=0,
mm_cancel_limit_delay=50, # 50 nanoseconds
# 5) Momentum Agents
num_momentum_agents=12,
):
"""
create the background configuration for rmsc04
These are all the non-learning agent that will run in the simulation
:param seed: seed of the experiment
:type seed: int
:param log_orders: debug mode to print more
:return: all agents of the config
:rtype: list
"""
# fix seed
np.random.seed(seed)
def path_wrapper(pomegranate_model_json):
"""
temporary solution to manage calls from abides-gym or from the rest of the code base
TODO:find more general solution
:return:
:rtype:
"""
# get the path of the file
path = os.getcwd()
if path.split("/")[-1] == "abides_gym":
return "../" + pomegranate_model_json
else:
return pomegranate_model_json
mm_wake_up_freq = str_to_ns(mm_wake_up_freq)
# order size model
ORDER_SIZE_MODEL = OrderSizeModel() # Order size model
# market marker derived parameters
MM_PARAMS = [
(mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size),
(mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size),
]
NUM_MM = len(MM_PARAMS)
# noise derived parameters
SIGMA_N = r_bar / 100 # observation noise variance
# date&time
DATE = int(pd.to_datetime(date).to_datetime64())
MKT_OPEN = DATE + str_to_ns("09:30:00")
MKT_CLOSE = DATE + str_to_ns(end_time)
# These times needed for distribution of arrival times of Noise Agents
NOISE_MKT_OPEN = MKT_OPEN - str_to_ns("00:30:00")
NOISE_MKT_CLOSE = DATE + str_to_ns("16:00:00")
# oracle
symbols = {
ticker: {
"r_bar": r_bar,
"kappa": kappa_oracle,
"sigma_s": sigma_s,
"fund_vol": fund_vol,
"megashock_lambda_a": megashock_lambda_a,
"megashock_mean": megashock_mean,
"megashock_var": megashock_var,
"random_state": np.random.RandomState(
seed=np.random.randint(low=0, high=2**32)
),
}
}
oracle = SparseMeanRevertingOracle(MKT_OPEN, NOISE_MKT_CLOSE, symbols)
# Agent configuration
agent_count, agents, agent_types = 0, [], []
agents.extend(
[
ExchangeAgent(
id=0,
name="EXCHANGE_AGENT",
type="ExchangeAgent",
mkt_open=MKT_OPEN,
mkt_close=MKT_CLOSE,
symbols=[ticker],
book_logging=book_logging,
book_log_depth=book_log_depth,
log_orders=exchange_log_orders,
pipeline_delay=0,
computation_delay=0,
stream_history=stream_history_length,
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
)
]
)
agent_types.extend("ExchangeAgent")
agent_count += 1
agents.extend(
[
NoiseAgent(
id=j,
name="NoiseAgent {}".format(j),
type="NoiseAgent",
symbol=ticker,
starting_cash=starting_cash,
wakeup_time=get_wake_time(NOISE_MKT_OPEN, NOISE_MKT_CLOSE),
log_orders=log_orders,
order_size_model=ORDER_SIZE_MODEL,
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
)
for j in range(agent_count, agent_count + num_noise_agents)
]
)
agent_count += num_noise_agents
agent_types.extend(["NoiseAgent"])
agents.extend(
[
ValueAgent(
id=j,
name="Value Agent {}".format(j),
type="ValueAgent",
symbol=ticker,
starting_cash=starting_cash,
sigma_n=SIGMA_N,
r_bar=r_bar,
kappa=kappa,
lambda_a=lambda_a,
log_orders=log_orders,
order_size_model=ORDER_SIZE_MODEL,
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
)
for j in range(agent_count, agent_count + num_value_agents)
]
)
agent_count += num_value_agents
agent_types.extend(["ValueAgent"])
agents.extend(
[
AdaptiveMarketMakerAgent(
id=j,
name="ADAPTIVE_POV_MARKET_MAKER_AGENT_{}".format(j),
type="AdaptivePOVMarketMakerAgent",
symbol=ticker,
starting_cash=starting_cash,
pov=MM_PARAMS[idx][1],
min_order_size=MM_PARAMS[idx][4],
window_size=MM_PARAMS[idx][0],
num_ticks=MM_PARAMS[idx][2],
wake_up_freq=MM_PARAMS[idx][3],
poisson_arrival=True,
cancel_limit_delay=mm_cancel_limit_delay,
skew_beta=mm_skew_beta,
price_skew_param=mm_price_skew,
level_spacing=mm_level_spacing,
spread_alpha=mm_spread_alpha,
backstop_quantity=mm_backstop_quantity,
log_orders=log_orders,
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
)
for idx, j in enumerate(range(agent_count, agent_count + NUM_MM))
]
)
agent_count += NUM_MM
agent_types.extend("POVMarketMakerAgent")
agents.extend(
[
MomentumAgent(
id=j,
name="MOMENTUM_AGENT_{}".format(j),
type="MomentumAgent",
symbol=ticker,
starting_cash=starting_cash,
min_size=1,
max_size=10,
wake_up_freq=str_to_ns("37s"),
poisson_arrival=True,
log_orders=log_orders,
order_size_model=ORDER_SIZE_MODEL,
random_state=np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
)
for j in range(agent_count, agent_count + num_momentum_agents)
]
)
agent_count += num_momentum_agents
agent_types.extend("MomentumAgent")
# extract kernel seed here to reproduce the state of random generator in old version
random_state_kernel = np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
)
# LATENCY
latency_model = generate_latency_model(agent_count)
default_computation_delay = 50 # 50 nanoseconds
##kernel args
kernelStartTime = DATE
kernelStopTime = MKT_CLOSE + str_to_ns("1s")
return {
"seed": seed,
"start_time": kernelStartTime,
"stop_time": kernelStopTime,
"agents": agents,
"agent_latency_model": latency_model,
"default_computation_delay": default_computation_delay,
"custom_properties": {"oracle": oracle},
"random_state_kernel": random_state_kernel,
"stdout_log_level": stdout_log_level,
}
| 9,653 | 32.175258 | 120 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/configs/rmsc03.py
|
# RMSC-3 (Reference Market Simulation Configuration):
# - 1 Exchange Agent
# - 2 Adaptive Market Maker Agents
# - 100 Value Agents
# - 25 Momentum Agents
# - 5000 Noise Agents
import numpy as np
from abides_core.utils import str_to_ns, datetime_str_to_ns, get_wake_time
from abides_markets.agents import (
ExchangeAgent,
NoiseAgent,
ValueAgent,
AdaptiveMarketMakerAgent,
MomentumAgent,
POVExecutionAgent,
)
from abides_markets.oracles import SparseMeanRevertingOracle
from abides_markets.orders import Side
from abides_markets.utils import generate_latency_model
########################################################################################################################
############################################### GENERAL CONFIG #########################################################
def build_config(
ticker="ABM",
historical_date="20200603",
start_time="09:30:00",
end_time="16:00:00",
exchange_log_orders=True,
log_orders=True,
book_logging=True,
book_log_depth=10,
# seed=int(NanosecondTime.now().timestamp() * 1000000) % (2 ** 32 - 1),
seed=1,
stdout_log_level="INFO",
##
num_momentum_agents=25,
num_noise_agents=5000,
num_value_agents=100,
## exec agent
execution_agents=True,
execution_pov=0.1,
## market maker
mm_pov=0.025,
mm_window_size="adaptive",
mm_min_order_size=1,
mm_num_ticks=10,
mm_wake_up_freq=str_to_ns("10S"),
mm_skew_beta=0,
mm_level_spacing=5,
mm_spread_alpha=0.75,
mm_backstop_quantity=50_000,
##fundamental/oracle
fund_r_bar=100_000,
fund_kappa=1.67e-16,
fund_sigma_s=0,
fund_vol=1e-3, # Volatility of fundamental time series (std).
fund_megashock_lambda_a=2.77778e-18,
fund_megashock_mean=1000,
fund_megashock_var=50_000,
##value agent
val_r_bar=100_000,
val_kappa=1.67e-15,
val_vol=1e-8,
val_lambda_a=7e-11,
):
fund_sigma_n = fund_r_bar / 10
val_sigma_n = val_r_bar / 10
symbol = ticker
##setting numpy seed
np.random.seed(seed)
########################################################################################################################
############################################### AGENTS CONFIG ##########################################################
# Historical date to simulate.
historical_date = datetime_str_to_ns(historical_date)
mkt_open = historical_date + str_to_ns(start_time)
mkt_close = historical_date + str_to_ns(end_time)
agent_count, agents, agent_types = 0, [], []
# Hyperparameters
starting_cash = 10000000 # Cash in this simulator is always in CENTS.
# Oracle
symbols = {
symbol: {
"r_bar": fund_r_bar,
"kappa": fund_kappa,
"sigma_s": fund_sigma_s,
"fund_vol": fund_vol,
"megashock_lambda_a": fund_megashock_lambda_a,
"megashock_mean": fund_megashock_mean,
"megashock_var": fund_megashock_var,
"random_state": np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
),
}
}
oracle = SparseMeanRevertingOracle(mkt_open, mkt_close, symbols)
# 1) Exchange Agent
# How many orders in the past to store for transacted volume computation
agents.extend(
[
ExchangeAgent(
id=0,
name="EXCHANGE_AGENT",
mkt_open=mkt_open,
mkt_close=mkt_close,
symbols=[symbol],
book_logging=book_logging,
book_log_depth=book_log_depth,
log_orders=exchange_log_orders,
pipeline_delay=0,
computation_delay=0,
stream_history=25_000,
)
]
)
agent_types.extend("ExchangeAgent")
agent_count += 1
# 2) Noise Agents
num_noise = num_noise_agents
noise_mkt_open = historical_date + str_to_ns("09:00:00")
noise_mkt_close = historical_date + str_to_ns("16:00:00")
agents.extend(
[
NoiseAgent(
id=j,
symbol=symbol,
starting_cash=starting_cash,
wakeup_time=get_wake_time(noise_mkt_open, noise_mkt_close),
log_orders=log_orders,
)
for j in range(agent_count, agent_count + num_noise)
]
)
agent_count += num_noise
agent_types.extend(["NoiseAgent"])
# 3) Value Agents
num_value = num_value_agents
agents.extend(
[
ValueAgent(
id=j,
name="Value Agent {}".format(j),
symbol=symbol,
starting_cash=starting_cash,
sigma_n=val_sigma_n,
r_bar=val_r_bar,
kappa=val_kappa,
lambda_a=val_lambda_a,
log_orders=log_orders,
)
for j in range(agent_count, agent_count + num_value)
]
)
agent_count += num_value
agent_types.extend(["ValueAgent"])
# 4) Market Maker Agents
"""
window_size == Spread of market maker (in ticks) around the mid price
pov == Percentage of transacted volume seen in previous `mm_wake_up_freq` that
the market maker places at each level
num_ticks == Number of levels to place orders in around the spread
wake_up_freq == How often the market maker wakes up
"""
# each elem of mm_params is tuple (window_size, pov, num_ticks, wake_up_freq, min_order_size)
mm_params = 2 * [
(mm_window_size, mm_pov, mm_num_ticks, mm_wake_up_freq, mm_min_order_size)
]
num_mm_agents = len(mm_params)
mm_cancel_limit_delay = 50 # 50 nanoseconds
agents.extend(
[
AdaptiveMarketMakerAgent(
id=j,
name="ADAPTIVE_POV_MARKET_MAKER_AGENT_{}".format(j),
type="AdaptivePOVMarketMakerAgent",
symbol=symbol,
starting_cash=starting_cash,
pov=mm_params[idx][1],
min_order_size=mm_params[idx][4],
window_size=mm_params[idx][0],
num_ticks=mm_params[idx][2],
wake_up_freq=mm_params[idx][3],
cancel_limit_delay=mm_cancel_limit_delay,
skew_beta=mm_skew_beta,
level_spacing=mm_level_spacing,
spread_alpha=mm_spread_alpha,
backstop_quantity=mm_backstop_quantity,
log_orders=log_orders,
)
for idx, j in enumerate(range(agent_count, agent_count + num_mm_agents))
]
)
agent_count += num_mm_agents
agent_types.extend("POVMarketMakerAgent")
# 5) Momentum Agents
num_momentum_agents = num_momentum_agents
agents.extend(
[
MomentumAgent(
id=j,
name="MOMENTUM_AGENT_{}".format(j),
symbol=symbol,
starting_cash=starting_cash,
min_size=1,
max_size=10,
wake_up_freq=str_to_ns("20s"),
log_orders=log_orders,
)
for j in range(agent_count, agent_count + num_momentum_agents)
]
)
agent_count += num_momentum_agents
agent_types.extend("MomentumAgent")
# 6) Execution Agent
trade = True if execution_agents else False
#### Participation of Volume Agent parameters
pov_agent_start_time = mkt_open + str_to_ns("00:30:00")
pov_agent_end_time = mkt_close - str_to_ns("00:30:00")
pov_proportion_of_volume = execution_pov
pov_quantity = 12e5
pov_frequency = str_to_ns("1min")
pov_direction = Side.BID
pov_agent = POVExecutionAgent(
id=agent_count,
name="POV_EXECUTION_AGENT",
type="ExecutionAgent",
symbol=symbol,
starting_cash=starting_cash,
start_time=pov_agent_start_time,
end_time=pov_agent_end_time,
freq=pov_frequency,
lookback_period=pov_frequency,
pov=pov_proportion_of_volume,
direction=pov_direction,
quantity=pov_quantity,
trade=trade,
log_orders=True, # needed for plots so conflicts with others
)
execution_agents = [pov_agent]
agents.extend(execution_agents)
agent_types.extend("ExecutionAgent")
agent_count += 1
# extract kernel seed here to reproduce the state of random generator in old version
random_state_kernel = np.random.RandomState(
seed=np.random.randint(low=0, high=2**32, dtype="uint64")
)
# LATENCY
latency_model = generate_latency_model(agent_count)
default_computation_delay = 50 # 50 nanoseconds
##kernel args
kernelStartTime = historical_date
kernelStopTime = mkt_close + str_to_ns("00:01:00")
return {
"start_time": kernelStartTime,
"stop_time": kernelStopTime,
"agents": agents,
"agent_latency_model": latency_model,
"default_computation_delay": default_computation_delay,
"custom_properties": {"oracle": oracle},
"random_state_kernel": random_state_kernel,
"stdout_log_level": stdout_log_level,
}
| 9,311 | 30.566102 | 124 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/configs/__init__.py
| 0 | 0 | 0 |
py
|
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_markets/utils/__init__.py
|
import datetime
import sys
import traceback
import warnings
from contextlib import contextmanager
from typing import List, Union
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from abides_core import LatencyModel
# Utility method to flatten nested lists.
def delist(list_of_lists):
return [x for b in list_of_lists for x in b]
def numeric(s):
"""Returns numeric type from string, stripping commas from the right.
Adapted from https://stackoverflow.com/a/379966.
"""
s = s.rstrip(",")
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def get_value_from_timestamp(s: pd.Series, ts: datetime.datetime):
"""Get the value of s corresponding to closest datetime to ts.
Arguments:
s: Pandas Series with pd.DatetimeIndex.
ts: Timestamp at which to retrieve data.
"""
ts_str = ts.strftime("%Y-%m-%d %H:%M:%S")
s = s.loc[~s.index.duplicated(keep="last")]
locs = s.index.get_loc(ts_str, method="nearest")
out = (
s[locs][0]
if (isinstance(s[locs], np.ndarray) or isinstance(s[locs], pd.Series))
else s[locs]
)
return out
@contextmanager
def ignored(warning_str, *exceptions):
"""Context manager that wraps the code block in a try except statement, catching
specified exceptions and printing warning supplied by user.
Arguments:
warning_str: Warning statement printed when exception encountered.
exceptions: An exception type, e.g. ``ValueError``.
https://stackoverflow.com/a/15573313
"""
try:
yield
except exceptions:
warnings.warn(warning_str, UserWarning, stacklevel=1)
print(warning_str)
def generate_uniform_random_pairwise_dist_on_line(
left: float, right: float, num_points: int, random_state: np.random.RandomState
) -> np.ndarray:
"""Uniformly generate points on an interval, and return numpy array of pairwise
distances between points.
Arguments:
left: Left endpoint of interval.
right: Right endpoint of interval.
num_points: Number of points to use.
random_state: ``np.random.RandomState`` object.
"""
x_coords = random_state.uniform(low=left, high=right, size=num_points)
x_coords = x_coords.reshape((x_coords.size, 1))
out = pdist(x_coords, "euclidean")
return squareform(out)
def meters_to_light_ns(x):
"""Converts x in units of meters to light nanoseconds."""
x_lns = x / 299792458e-9
x_lns = x_lns.astype(int)
return x_lns
def validate_window_size(s):
"""Check if s is integer or string 'adaptive'."""
try:
return int(s)
except ValueError:
if s.lower() == "adaptive":
return s.lower()
else:
raise ValueError(f'String {s} must be integer or string "adaptive".')
def sigmoid(x, beta):
"""Numerically stable sigmoid function.
Adapted from https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/"
"""
if x >= 0:
z = np.exp(-beta * x)
return 1 / (1 + z)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
z = np.exp(beta * x)
return z / (1 + z)
def subdict(d, keys):
return dict((k, v) for k, v in d.items() if k in keys)
def restrictdict(d, keys):
inter = [k for k in d.keys() if k in keys]
return subdict(d, inter)
def dollarize(cents: Union[List[int], int]) -> Union[List[str], str]:
"""Dollarizes int-cents prices for printing.
Defined outside the class for utility access by non-agent classes.
Arguments:
cents:
"""
if isinstance(cents, list):
return [dollarize(x) for x in cents]
elif isinstance(cents, (int, np.int64)):
return "${:0.2f}".format(cents / 100)
else:
# If cents is already a float, there is an error somewhere.
raise ValueError(
f"dollarize(cents) called without int or list of ints: {cents} (got type '{type(cents)}')"
)
# LATENCY
def generate_latency_model(agent_count, latency_type="deterministic"):
assert latency_type in [
"deterministic",
"no_latency",
], "Please select a correct latency_type"
latency_rstate = np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32))
pairwise = (agent_count, agent_count)
if latency_type == "deterministic":
# All agents sit on line from Seattle to NYC
nyc_to_seattle_meters = 3866660
pairwise_distances = generate_uniform_random_pairwise_dist_on_line(
0.0, nyc_to_seattle_meters, agent_count, random_state=latency_rstate
)
pairwise_latencies = meters_to_light_ns(pairwise_distances)
else: # latency_type == "no_latency"
pairwise_latencies = np.zeros(pairwise, dtype=int)
latency_model = LatencyModel(
latency_model="deterministic",
random_state=latency_rstate,
connected=True,
min_latency=pairwise_latencies,
)
return latency_model
def config_add_agents(orig_config_state, agents):
agent_count = len(orig_config_state["agents"])
orig_config_state["agents"] = orig_config_state["agents"] + agents
# adding an agent to the config implies modifying the latency model #TODO: tell aymeric
lat_mod = generate_latency_model(agent_count + len(agents))
orig_config_state["agent_latency_model"] = lat_mod
return orig_config_state
| 5,583 | 27.489796 | 102 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/generators.py
|
from abc import abstractmethod, ABC
from typing import Generic, Optional, TypeVar
import numpy as np
T = TypeVar("T")
class BaseGenerator(ABC, Generic[T]):
"""
This is an abstract base class defining the interface for Generator objects in
ABIDES. This class is not used directly and is instead inherited from child classes.
Generators should produce an infinite amount of values.
"""
@abstractmethod
def next(self) -> T:
"""
Produces the next value from the generator.
"""
raise NotImplementedError
@abstractmethod
def mean(self) -> T:
"""
Returns the average of the distribution of values generated.
"""
raise NotImplementedError
class InterArrivalTimeGenerator(BaseGenerator[float], ABC):
"""
General class for time generation. These generators are used to generates a delta time between currrent time and the next wakeup of the agent.
"""
pass
class ConstantTimeGenerator(InterArrivalTimeGenerator):
"""
Generates constant delta time of length step_duration
Arguments:
step_duration: length of the delta time in ns
"""
def __init__(self, step_duration: float) -> None:
self.step_duration: float = step_duration
def next(self) -> float:
"""
returns constant time delta for next wakeup
"""
return self.step_duration
def mean(self) -> float:
"""
time delta is constant
"""
return self.step_duration
class PoissonTimeGenerator(InterArrivalTimeGenerator):
"""
Lambda must be specified either in second through lambda_time or seconds^-1
through lambda_freq.
Arguments:
random_generator: configuration random generator
lambda_freq: frequency (in s^-1)
lambda_time: period (in seconds)
"""
def __init__(
self,
random_generator: np.random.RandomState,
lambda_freq: Optional[float] = None,
lambda_time: Optional[float] = None,
) -> None:
self.random_generator: np.random.RandomState = random_generator
assert (lambda_freq is None and lambda_time is not None) or (
lambda_time is None and lambda_freq is not None
), "specify lambda in frequency OR in time"
self.lambda_s: float = lambda_freq or 1 / lambda_time
def next(self) -> Optional[float]:
"""
returns time delta for next wakeup with time delta following Poisson distribution
"""
seconds = self.random_generator.exponential(1 / self.lambda_s)
return seconds * 1_000_000_000 if seconds is not None else None
def mean(self) -> float:
"""
returns the mean of a Poisson(lambda) distribution (i.e., 1/lambda)
"""
return 1 / self.lambda_s
| 2,838 | 26.833333 | 146 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/abides.py
|
import datetime as dt
import logging
from typing import Any, Dict, Optional
import coloredlogs
import numpy as np
from .kernel import Kernel
from .utils import subdict
logger = logging.getLogger("abides")
def run(
config: Dict[str, Any],
log_dir: str = "",
kernel_seed: int = 0,
kernel_random_state: Optional[np.random.RandomState] = None,
) -> Dict[str, Any]:
"""
Wrapper function that enables to run one simulation.
It does the following steps:
- instantiation of the kernel
- running of the simulation
- return the end_state object
Arguments:
config: configuration file for the specific simulation
log_dir: directory where log files are stored
kernel_seed: simulation seed
kernel_random_state: simulation random state
"""
coloredlogs.install(
level=config["stdout_log_level"],
fmt="[%(process)d] %(levelname)s %(name)s %(message)s",
)
kernel = Kernel(
random_state=kernel_random_state or np.random.RandomState(seed=kernel_seed),
log_dir=log_dir,
**subdict(
config,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
sim_start_time = dt.datetime.now()
logger.info(f"Simulation Start Time: {sim_start_time}")
end_state = kernel.run()
sim_end_time = dt.datetime.now()
logger.info(f"Simulation End Time: {sim_end_time}")
logger.info(f"Time taken to run simulation: {sim_end_time - sim_start_time}")
return end_state
| 1,701 | 24.787879 | 84 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/message.py
|
from dataclasses import dataclass, field
from typing import ClassVar, List
@dataclass
class Message:
"""The base Message class no longer holds envelope/header information, however any
desired information can be placed in the arbitrary body.
Delivery metadata is now handled outside the message itself.
The body may be overridden by specific message type subclasses.
"""
# The autoincrementing variable here will ensure that, when Messages are due for
# delivery at the same time step, the Message that was created first is delivered
# first. (Which is not important, but Python 3 requires a fully resolved chain of
# priority in all cases, so we need something consistent) We might want to generate
# these with stochasticity, but guarantee uniqueness somehow, to make delivery of
# orders at the same exact timestamp "random" instead of "arbitrary" (FIFO among
# tied times) as it currently is.
__message_id_counter: ClassVar[int] = 1
message_id: int = field(init=False)
def __post_init__(self):
self.message_id: int = Message.__message_id_counter
Message.__message_id_counter += 1
def __lt__(self, other: "Message") -> bool:
# Required by Python3 for this object to be placed in a priority queue.
return self.message_id < other.message_id
def type(self) -> str:
return self.__class__.__name__
@dataclass
class MessageBatch(Message):
"""
Helper used for batching multiple messages being sent by the same sender to the same
destination together. If very large numbers of messages are being sent this way,
using this class can help performance.
"""
messages: List[Message]
@dataclass
class WakeupMsg(Message):
"""
Empty message sent to agents when woken up.
"""
pass
| 1,828 | 32.254545 | 88 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/utils.py
|
"""
General purpose utility functions for the simulator, attached to no particular class.
Available to any agent or other module/utility. Should not require references to
any simulator object (kernel, agent, etc).
"""
import inspect
import hashlib
import os
import pickle
from typing import List, Dict, Any, Callable
import numpy as np
import pandas as pd
from . import NanosecondTime
def subdict(d: Dict[str, Any], keys: List[str]) -> Dict[str, Any]:
"""
Returns a dictionnary with only the keys defined in the keys list
Arguments:
- d: original dictionnary
- keys: list of keys to keep
Returns:
- dictionnary with only the subset of keys
"""
return {k: v for k, v in d.items() if k in keys}
def restrictdict(d: Dict[str, Any], keys: List[str]) -> Dict[str, Any]:
"""
Returns a dictionnary with only the intersections of the keys defined in the keys list and the keys in the o
Arguments:
- d: original dictionnary
- keys: list of keys to keep
Returns:
- dictionnary with only the subset of keys
"""
inter = [k for k in d.keys() if k in keys]
return subdict(d, inter)
def custom_eq(a: Any, b: Any) -> bool:
"""returns a==b or True if both a and b are null"""
return (a == b) | ((a != a) & (b != b))
# Utility function to get agent wake up times to follow a U-quadratic distribution.
def get_wake_time(open_time, close_time, a=0, b=1):
"""
Draw a time U-quadratically distributed between open_time and close_time.
For details on U-quadtratic distribution see https://en.wikipedia.org/wiki/U-quadratic_distribution.
"""
def cubic_pow(n: float) -> float:
"""Helper function: returns *real* cube root of a float."""
if n < 0:
return -((-n) ** (1.0 / 3.0))
else:
return n ** (1.0 / 3.0)
# Use inverse transform sampling to obtain variable sampled from U-quadratic
def u_quadratic_inverse_cdf(y):
alpha = 12 / ((b - a) ** 3)
beta = (b + a) / 2
result = cubic_pow((3 / alpha) * y - (beta - a) ** 3) + beta
return result
uniform_0_1 = np.random.rand()
random_multiplier = u_quadratic_inverse_cdf(uniform_0_1)
wake_time = open_time + random_multiplier * (close_time - open_time)
return wake_time
def fmt_ts(timestamp: NanosecondTime) -> str:
"""
Converts a timestamp stored as nanoseconds into a human readable string.
"""
return pd.Timestamp(timestamp, unit="ns").strftime("%Y-%m-%d %H:%M:%S")
def str_to_ns(string: str) -> NanosecondTime:
"""
Converts a human readable time-delta string into nanoseconds.
Arguments:
string: String to convert into nanoseconds. Uses Pandas to do this.
Examples:
- "1s" -> 1e9 ns
- "1min" -> 6e10 ns
- "00:00:30" -> 3e10 ns
"""
return pd.to_timedelta(string).to_timedelta64().astype(int)
def datetime_str_to_ns(string: str) -> NanosecondTime:
"""
Takes a datetime written as a string and returns in nanosecond unix timestamp.
Arguments:
string: String to convert into nanoseconds. Uses Pandas to do this.
"""
return pd.Timestamp(string).value
def ns_date(ns_datetime: NanosecondTime) -> NanosecondTime:
"""
Takes a datetime in nanoseconds unix timestamp and rounds it to that day at 00:00.
Arguments:
ns_datetime: Nanosecond time value to round.
"""
return ns_datetime - (ns_datetime % (24 * 3600 * int(1e9)))
def parse_logs_df(end_state: dict) -> pd.DataFrame:
"""
Takes the end_state dictionnary returned by an ABIDES simulation goes through all
the agents, extracts their log, and un-nest them returns a single dataframe with the
logs from all the agents warning: this is meant to be used for debugging and
exploration.
"""
agents = end_state["agents"]
dfs = []
for agent in agents:
messages = []
for m in agent.log:
m = {
"EventTime": m[0] if isinstance(m[0], (int, np.int64)) else 0,
"EventType": m[1],
"Event": m[2],
}
event = m.get("Event", None)
if event == None:
event = {"EmptyEvent": True}
elif not isinstance(event, dict):
event = {"ScalarEventValue": event}
else:
pass
try:
del m["Event"]
except:
pass
m.update(event)
if m.get("agent_id") == None:
m["agent_id"] = agent.id
m["agent_type"] = agent.type
messages.append(m)
dfs.append(pd.DataFrame(messages))
return pd.concat(dfs)
# caching utils: not used by abides but useful to have
def input_sha_wrapper(func: Callable) -> Callable:
"""
compute a sha for the function call by looking at function name and inputs for the call
"""
def inner(*args, **kvargs):
argspec = inspect.getfullargspec(func)
index_first_kv = len(argspec.args) - (
len(argspec.defaults) if argspec.defaults != None else 0
)
if len(argspec.args) > 0:
total_kvargs = dict(
(k, v) for k, v in zip(argspec.args[index_first_kv:], argspec.defaults)
)
else:
total_kvargs = {}
total_kvargs.update(kvargs)
input_sha = (
func.__name__
+ "_"
+ hashlib.sha1(str.encode(str((args, total_kvargs)))).hexdigest()
)
return {"input_sha": input_sha}
return inner
def cache_wrapper(
func: Callable, cache_dir="cache/", force_recompute=False
) -> Callable:
"""
local caching decorator
checks the functional call sha is only there is specified directory
"""
def inner(*args, **kvargs):
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
sha_call = input_sha_wrapper(func)(*args, **kvargs)
cache_path = cache_dir + sha_call["input_sha"] + ".pkl"
if os.path.isfile(cache_path) and not force_recompute:
with open(cache_path, "rb") as handle:
result = pickle.load(handle)
return result
else:
result = func(*args, **kvargs)
with open(cache_path, "wb") as handle:
pickle.dump(result, handle)
return result
return inner
| 6,468 | 29.804762 | 112 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/agent.py
|
import logging
from copy import deepcopy
from typing import Any, List, Optional, Tuple
import numpy as np
import pandas as pd
from . import NanosecondTime
from .message import Message, MessageBatch
from .utils import fmt_ts
logger = logging.getLogger(__name__)
class Agent:
"""
Base Agent class
Attributes:
id: Must be a unique number (usually autoincremented).
name: For human consumption, should be unique (often type + number).
type: For machine aggregation of results, should be same for all agents
following the same strategy (incl. parameter settings).
random_state: an np.random.RandomState object, already seeded. Every agent
is given a random state to use for any stochastic needs.
log_events: flag to log or not the events during the simulation
log_to_file: flag to write on disk or not the logged events
"""
def __init__(
self,
id: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
log_events: bool = True,
log_to_file: bool = True,
) -> None:
self.id: int = id
self.type: str = type or self.__class__.__name__
self.name: str = name or f"{self.type}_{self.id}"
self.random_state: np.random.RandomState = (
random_state
or np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32, dtype="uint64")
)
)
self.log_events: bool = log_events
self.log_to_file: bool = log_to_file & log_events
# Kernel is supplied via kernel_initializing method of kernel lifecycle.
self.kernel = None
# What time does the agent think it is? Should be updated each time
# the agent wakes via wakeup or receive_message. (For convenience
# of reference throughout the Agent class hierarchy, NOT THE
# CANONICAL TIME.)
self.current_time: NanosecondTime = 0
# Agents may choose to maintain a log. During simulation,
# it should be stored as a list of dictionaries. The expected
# keys by default are: EventTime, EventType, Event. Other
# Columns may be added, but will then require specializing
# parsing and will increase output dataframe size. If there
# is a non-empty log, it will be written to disk as a Dataframe
# at kernel termination.
# It might, or might not, make sense to formalize these log Events
# as a class, with enumerated EventTypes and so forth.
self.log: List[Tuple[NanosecondTime, str, Any]] = []
self.logEvent("AGENT_TYPE", type)
### Flow of required kernel listening methods:
### init -> start -> (entire simulation) -> end -> terminate
def kernel_initializing(self, kernel) -> None:
"""
Called by the kernel one time when simulation first begins.
No other agents are guaranteed to exist at this time.
Kernel reference must be retained, as this is the only time the agent can
"see" it.
Arguments:
kernel: The Kernel instance running the experiment.
"""
self.kernel = kernel
logger.debug("{} exists!".format(self.name))
def kernel_starting(self, start_time: NanosecondTime) -> None:
"""
Called by the kernel one time after simulationInitializing.
All other agents are guaranteed to exist at this time.
Base Agent schedules a wakeup call for the first available timestamp.
Subclass agents may override this behavior as needed.
Arguments:
start_time: The earliest time for which the agent can schedule a wakeup call
(or could receive a message).
"""
assert self.kernel is not None
logger.debug(
"Agent {} ({}) requesting kernel wakeup at time {}".format(
self.id, self.name, fmt_ts(start_time)
)
)
self.set_wakeup(start_time)
def kernel_stopping(self) -> None:
"""
Called by the kernel one time before simulationTerminating.
All other agents are guaranteed to exist at this time.
"""
pass
def kernel_terminating(self) -> None:
"""
Called by the kernel one time when simulation terminates.
No other agents are guaranteed to exist at this time.
"""
# If this agent has been maintaining a log, convert it to a Dataframe
# and request that the Kernel write it to disk before terminating.
if self.log and self.log_to_file:
df_log = pd.DataFrame(self.log, columns=("EventTime", "EventType", "Event"))
df_log.set_index("EventTime", inplace=True)
self.write_log(df_log)
### Methods for internal use by agents (e.g. bookkeeping).
def logEvent(
self,
event_type: str,
event: Any = "",
append_summary_log: bool = False,
deepcopy_event: bool = True,
) -> None:
"""
Adds an event to this agent's log.
The deepcopy of the Event field, often an object, ensures later state
changes to the object will not retroactively update the logged event.
Arguments:
event_type: label of the event (e.g., Order submitted, order accepted last trade etc....)
event: actual event to be logged
append_summary_log:
deepcopy_event: Set to False to skip deepcopying the event object.
"""
if not self.log_events:
return
# We can make a single copy of the object (in case it is an arbitrary
# class instance) for both potential log targets, because we don't
# alter logs once recorded.
if deepcopy_event:
event = deepcopy(event)
self.log.append((self.current_time, event_type, event))
if append_summary_log:
assert self.kernel is not None
self.kernel.append_summary_log(self.id, event_type, event)
### Methods required for communication from other agents.
### The kernel will _not_ call these methods on its own behalf,
### only to pass traffic from other agents..
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""
Called each time a message destined for this agent reaches the front of the
kernel's priority queue.
Arguments:
current_time: The simulation time at which the kernel is delivering this
message -- the agent should treat this as "now".
sender_id: The ID of the agent who sent the message.
message: An object guaranteed to inherit from the message.Message class.
"""
assert self.kernel is not None
self.current_time = current_time
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"At {}, agent {} ({}) received: {}".format(
fmt_ts(current_time), self.id, self.name, message
)
)
def wakeup(self, current_time: NanosecondTime) -> None:
"""
Agents can request a wakeup call at a future simulation time using
``Agent.set_wakeup()``.
This is the method called when the wakeup time arrives.
Arguments:
current_time: The simulation time at which the kernel is delivering this
message -- the agent should treat this as "now".
"""
assert self.kernel is not None
self.current_time = current_time
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"At {}, agent {} ({}) received wakeup.".format(
fmt_ts(current_time), self.id, self.name
)
)
### Presently the kernel expects agent IDs only, not agent references.
### It is possible this could change in the future. Normal agents will
### not typically wish to request additional delay.
def send_message(self, recipient_id: int, message: Message, delay: int = 0) -> None:
"""
Sends a message to another Agent.
Arguments:
recipient_id: ID of the agent receiving the message.
message: The ``Message`` class instance to send.
delay: Represents an agent's request for ADDITIONAL delay (beyond the
Kernel's mandatory computation + latency delays). Represents parallel
pipeline processing delays (that should delay the transmission of
messages but do not make the agent "busy" and unable to respond to new
messages)
"""
assert self.kernel is not None
self.kernel.send_message(self.id, recipient_id, message, delay=delay)
def send_message_batch(
self, recipient_id: int, messages: List[Message], delay: NanosecondTime = 0
) -> None:
"""
Sends a batch of messages to another Agent.
Arguments:
recipient_id: ID of the agent receiving the messages.
messages: A list of ``Message`` class instances to send.
delay: Represents an agent's request for ADDITIONAL delay (beyond the
Kernel's mandatory computation + latency delays). Represents parallel
pipeline processing delays (that should delay the transmission of messages
but do not make the agent "busy" and unable to respond to new messages)
"""
assert self.kernel is not None
self.kernel.send_message(
self.id, recipient_id, MessageBatch(messages), delay=delay
)
def set_wakeup(self, requested_time: NanosecondTime) -> None:
"""
Called to receive a "wakeup call" from the kernel at some requested future time.
Arguments:
requested_time: Defaults to the next possible timestamp. Wakeup time cannot
be the current time or a past time.
"""
assert self.kernel is not None
self.kernel.set_wakeup(self.id, requested_time)
def get_computation_delay(self):
"""Queries thr agent's current computation delay from the kernel."""
return self.kernel.get_agent_compute_delay(sender_id=self.id)
def set_computation_delay(self, requested_delay: int) -> None:
"""
Calls the kernel to update the agent's computation delay.
This does not initiate a global delay, nor an immediate delay for the agent.
Rather it sets the new default delay for the calling agent. The delay will be
applied upon every return from wakeup or recvMsg.
Note that this delay IS applied to any messages sent by the agent during the
current wake cycle (simulating the messages popping out at the end of its
"thinking" time).
Also note that we DO permit a computation delay of zero, but this should really
only be used for special or massively parallel agents.
Arguments:
requested_delay: delay given in nanoseconds.
"""
assert self.kernel is not None
self.kernel.set_agent_compute_delay(
sender_id=self.id, requested_delay=requested_delay
)
def delay(self, additional_delay: int) -> None:
"""
Accumulates a temporary delay for the current wake cycle for this agent.
This will apply the total delay (at time of send_message) to each message, and
will modify the agent's next available time slot. These happen on top of the
agent's compute delay BUT DO NOT ALTER IT. (i.e. effects are transient). Mostly
useful for staggering outbound messages.
Arguments:
additional_delay: additional delay given in nanoseconds.
"""
assert self.kernel is not None
self.kernel.delay_agent(sender_id=self.id, additional_delay=additional_delay)
def write_log(self, df_log: pd.DataFrame, filename: Optional[str] = None) -> None:
"""
Called by the agent, usually at the very end of the simulation just before
kernel shutdown, to write to disk any log dataframe it has been accumulating
during simulation.
The format can be decided by the agent, although changes will require a special
tool to read and parse the logs. The Kernel places the log in a unique
directory per run, with one filename per agent, also decided by the Kernel using
agent type, id, etc.
If filename is None the Kernel will construct a filename based on the name of
the Agent requesting log archival.
Arguments:
df_log: dataframe that contains all the logged events during the simulation
filename: Location on disk to write the log to.
"""
assert self.kernel is not None
self.kernel.write_log(self.id, df_log, filename)
def update_agent_state(self, state: Any) -> None:
"""
Agents should use this method to replace their custom state in the dictionary
the Kernel will return to the experimental config file at the end of the
simulation.
This is intended to be write-only, and agents should not use it to store
information for their own later use.
Arguments:
state: The new state.
"""
assert self.kernel is not None
self.kernel.update_agent_state(self.id, state)
### Internal methods that should not be modified without a very good reason.
def __lt__(self, other) -> bool:
# Required by Python3 for this object to be placed in a priority queue.
return f"{self.id}" < f"{other.id}"
| 13,851 | 35.645503 | 101 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/latency_model.py
|
from typing import Union
import numpy as np
class LatencyModel:
"""
LatencyModel provides a latency model for messages in the ABIDES simulation. The
default is a cubic model as described herein.
Arguments:
random_state: An initialized ``np.random.RandomState`` object.
min_latency: A 2-D numpy array of pairwise minimum latency. Integer nanoseconds.
latency_model: Either 'cubic' or 'deterministic'.
connected: Must be either scalar True or a 2-D numpy array. A False array entry
prohibits communication regardless of values in other parameters.
jitter: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array. Controls
shape of cubic curve for per-message additive latency noise. This is the 'a'
parameter in the cubic equation above. Float in range [0,1].
jitter_clip: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array.
Controls the minimum value of the uniform range from which 'x' is selected
when applying per-message noise. Higher values create a LOWER maximum value
for latency noise (clipping the cubic curve). Parameter is exclusive, 'x' is
drawn from (jitter_clip,1]. Float in range [0,1].
jitter_unit: Requires a scalar, a 1-D numpy vector, or a 2-D numpy array. This
is the fraction of min_latency that will be considered the unit of
measurement for jitter. For example, if this parameter is 10, an agent pair
with min_latency of 333ns will have a 33.3ns unit of measurement for jitter,
and an agent pair with min_latency of 13ms will have a 1.3ms unit of
measurement for jitter. Assuming 'jitter' = 0.5 and 'jitter_clip' = 0, the
first agent pair will have 50th percentile (median) jitter of 133.3ns and
90th percentile jitter of 16.65us, and the second agent pair will have 50th
percentile (median) jitter of 5.2ms and 90th percentile jitter of 650ms.
All values except min_latency may be specified as a single scalar for simplicity,
and have defaults to allow ease of use as:
``latency = LatencyModel('cubic', min_latency = some_array)``
All values may be specified with directional pairwise granularity to permit quite
complex network models, varying quality of service, or asymmetric capabilities when
these are necessary.
**Cubic Model:**
Using the 'cubic' model, the final latency for a message is computed as:
``min_latency + (a / (x^3))``, where 'x' is randomly drawn from a uniform
distribution ``(jitter_clip,1]``, and 'a' is the jitter parameter defined below.
The 'cubic' model requires five parameters (there are defaults for four). Scalar
values apply to all messages between all agents. Numpy array parameters are all
indexed by simulation agent_id. Vector arrays (1-D) are indexed to the sending
agent. For 2-D arrays of directional pairwise values, row index is the sending agent
and column index is the receiving agent. These do not have to be symmetric.
Selection within the range is from a cubic distribution, so extreme high values will be
quite rare. The table below shows example values based on the jitter parameter a (column
header) and x drawn from a uniform distribution from [0,1] (row header).::
x \ a 0.001 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 1.00
0.001 1M 100M 200M 300M 400M 500M 600M 700M 800M 900M 1B
0.01 1K 100K 200K 300K 400K 500K 600K 700K 800K 900K 1M
0.05 8.00 800.00 1.6K 2.4K 3.2K 4.0K 4.8K 5.6K 6.4K 7.2K 8.0K
0.10 1.00 100.00 200.00 300.00 400.00 500.00 600.00 700.00 800.00 900.00 1,000.00
0.20 0.13 12.50 25.00 37.50 50.00 62.50 75.00 87.50 100.00 112.50 125.00
0.30 0.04 3.70 7.41 11.11 14.81 18.52 22.22 25.93 29.63 33.33 37.04
0.40 0.02 1.56 3.13 4.69 6.25 7.81 9.38 10.94 12.50 14.06 15.63
0.50 0.01 0.80 1.60 2.40 3.20 4.00 4.80 5.60 6.40 7.20 8.00
0.60 0.00 0.46 0.93 1.39 1.85 2.31 2.78 3.24 3.70 4.17 4.63
0.70 0.00 0.29 0.58 0.87 1.17 1.46 1.75 2.04 2.33 2.62 2.92
0.80 0.00 0.20 0.39 0.59 0.78 0.98 1.17 1.37 1.56 1.76 1.95
0.90 0.00 0.14 0.27 0.41 0.55 0.69 0.82 0.96 1.10 1.23 1.37
0.95 0.00 0.12 0.23 0.35 0.47 0.58 0.70 0.82 0.93 1.05 1.17
0.99 0.00 0.10 0.21 0.31 0.41 0.52 0.62 0.72 0.82 0.93 1.03
1.00 0.00 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 1.00
"""
def __init__(
self,
random_state: np.random.RandomState,
min_latency: np.ndarray,
latency_model: str = "cubic",
# Args for cubic latency model:
connected: bool = True,
jitter: float = 0.5,
jitter_clip: float = 0.1,
jitter_unit: float = 10.0,
) -> None:
self.latency_model: str = latency_model.lower()
self.random_state: np.random.RandomState = random_state
self.min_latency: np.ndarray = min_latency
if self.latency_model not in ["cubic", "deterministic"]:
raise Exception(
f"Config error: unknown latency model requested ({self.latency_model})"
)
# Check required parameters and apply defaults for the selected model.
if self.latency_model == "cubic":
self.connected = connected
self.jitter = jitter
self.jitter_clip = jitter_clip
self.jitter_unit = jitter_unit
def get_latency(self, sender_id: int, recipient_id: int) -> float:
"""LatencyModel.get_latency() samples and returns the final latency for a single
Message according to the model specified during initialization.
Arguments:
sender_id: Simulation agent_id for the agent sending the message.
recipient_id: Simulation agent_id for the agent receiving the message.
"""
min_latency = self._extract(self.min_latency, sender_id, recipient_id)
if self.latency_model == "cubic":
# Generate latency for a single message using the cubic model.
# If agents cannot communicate in this direction, return special latency -1.
if not self._extract(self.connected, sender_id, recipient_id):
return -1
# Extract the cubic parameters and compute the final latency.
a = self._extract(self.jitter, sender_id, recipient_id)
clip = self._extract(self.jitter_clip, sender_id, recipient_id)
unit = self._extract(self.jitter_unit, sender_id, recipient_id)
# Jitter requires a uniform random draw.
x = self.random_state.uniform(low=clip, high=1.0)
# Now apply the cubic model to compute jitter and the final message latency.
latency = min_latency + ((a / x ** 3) * (min_latency / unit))
return latency
else: # self.latency_model == 'deterministic'
return min_latency
def _extract(self, param: Union[float, np.ndarray], sid: int, rid: int):
"""Internal function to extract correct values for a sender->recipient
pair from parameters that can be specified as scalar, 1-D ndarray, or 2-D ndarray.
Arguments:
param: The parameter (not parameter name) from which to extract a value.
sid: The simulation sender_id agent id.
rid: The simulation recipient agent id.
"""
if np.isscalar(param):
return param
if isinstance(param, np.ndarray):
if param.ndim == 1:
return param[sid]
elif param.ndim == 2:
return param[sid, rid]
raise Exception(
"Config error: LatencyModel parameter is not scalar, 1-D ndarray, or 2-D ndarray."
)
| 8,166 | 50.36478 | 94 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/__init__.py
|
# define first to prevent circular import errors
NanosecondTime = int
from .agent import Agent
from .kernel import Kernel
from .latency_model import LatencyModel
from .message import Message, MessageBatch
| 206 | 24.875 | 48 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/abides_core/kernel.py
|
import logging
import queue
import os
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Type
import numpy as np
import pandas as pd
from . import NanosecondTime
from .agent import Agent
from .message import Message, MessageBatch, WakeupMsg
from .latency_model import LatencyModel
from .utils import fmt_ts, str_to_ns
logger = logging.getLogger(__name__)
class Kernel:
"""
ABIDES Kernel
Arguments:
agents: List of agents to include in the simulation.
start_time: Timestamp giving the start time of the simulation.
stop_time: Timestamp giving the end time of the simulation.
default_computation_delay: time penalty applied to an agent each time it is
awakened (wakeup or recvMsg).
default_latency: latency imposed on each computation, modeled physical latency in systems and avoid infinite loop of events happening at the same exact time (in ns)
agent_latency: legacy parameter, used when agent_latency_model is not defined
latency_noise:legacy parameter, used when agent_latency_model is not defined
agent_latency_model: Model of latency used for the network of agents.
skip_log: if True, no log saved on disk.
seed: seed of the simulation.
log_dir: directory where data is store.
custom_properties: Different attributes that can be added to the simulation
(e.g., the oracle).
"""
def __init__(
self,
agents: List[Agent],
start_time: NanosecondTime = str_to_ns("09:30:00"),
stop_time: NanosecondTime = str_to_ns("16:00:00"),
default_computation_delay: int = 1,
default_latency: float = 1,
agent_latency: Optional[List[List[float]]] = None,
latency_noise: List[float] = [1.0],
agent_latency_model: Optional[LatencyModel] = None,
skip_log: bool = True,
seed: Optional[int] = None,
log_dir: Optional[str] = None,
custom_properties: Optional[Dict[str, Any]] = None,
random_state: Optional[np.random.RandomState] = None,
) -> None:
custom_properties = custom_properties or {}
self.random_state: np.random.RandomState = (
random_state
or np.random.RandomState(
seed=np.random.randint(low=0, high=2 ** 32, dtype="uint64")
)
)
# A single message queue to keep everything organized by increasing
# delivery timestamp.
self.messages: queue.PriorityQueue[(int, str, Message)] = queue.PriorityQueue()
# Timestamp at which the Kernel was created. Primarily used to
# create a unique log directory for this run. Also used to
# print some elapsed time and messages per second statistics.
self.kernel_wall_clock_start: datetime = datetime.now()
self.mean_result_by_agent_type: Dict[str, Any] = {}
self.agent_count_by_type: Dict[str, int] = {}
# The Kernel maintains a summary log to which agents can write
# information that should be centralized for very fast access
# by separate statistical summary programs. Detailed event
# logging should go only to the agent's individual log. This
# is for things like "final position value" and such.
self.summary_log: List[Dict[str, Any]] = []
# variable to say if has already run at least once or not
self.has_run = False
for key, value in custom_properties.items():
setattr(self, key, value)
# agents must be a list of agents for the simulation,
# based on class agent.Agent
self.agents: List[Agent] = agents
# Filter for any ABIDES-Gym agents - does not require dependency on ABIDES-gym.
self.gym_agents: List[Agent] = list(
filter(
lambda agent: "CoreGymAgent"
in [c.__name__ for c in agent.__class__.__bases__],
agents,
)
)
# Temporary check until ABIDES-gym supports multiple gym agents
assert (
len(self.gym_agents) <= 1
), "ABIDES-gym currently only supports using one gym agent"
logger.debug(f"Detected {len(self.gym_agents)} ABIDES-gym agents")
# Simulation custom state in a freeform dictionary. Allows config files
# that drive multiple simulations, or require the ability to generate
# special logs after simulation, to obtain needed output without special
# case code in the Kernel. Per-agent state should be handled using the
# provided update_agent_state() method.
self.custom_state: Dict[str, Any] = {}
# The kernel start and stop time (first and last timestamp in
# the simulation, separate from anything like exchange open/close).
self.start_time: NanosecondTime = start_time
self.stop_time: NanosecondTime = stop_time
# This is a NanosecondTime that includes the date.
self.current_time: NanosecondTime = start_time
# The global seed, NOT used for anything agent-related.
self.seed: Optional[int] = seed
# Should the Kernel skip writing agent logs?
self.skip_log: bool = skip_log
# If a log directory was not specified, use the initial wallclock.
self.log_dir: str = log_dir or str(
int(self.kernel_wall_clock_start.timestamp())
)
# The kernel maintains a current time for each agent to allow
# simulation of per-agent computation delays. The agent's time
# is pushed forward (see below) each time it awakens, and it
# cannot receive new messages/wakeups until the global time
# reaches the agent's time. (i.e. it cannot act again while
# it is still "in the future")
# This also nicely enforces agents being unable to act before
# the simulation start_time.
self.agent_current_times: List[NanosecondTime] = [self.start_time] * len(
self.agents
)
# agent_computation_delays is in nanoseconds, starts with a default
# value from config, and can be changed by any agent at any time
# (for itself only). It represents the time penalty applied to
# an agent each time it is awakened (wakeup or recvMsg). The
# penalty applies _after_ the agent acts, before it may act again.
self.agent_computation_delays: List[int] = [default_computation_delay] * len(
self.agents
)
# If an agent_latency_model is defined, it will be used instead of
# the older, non-model-based attributes.
self.agent_latency_model = agent_latency_model
# If an agent_latency_model is NOT defined, the older parameters:
# agent_latency (or default_latency) and latency_noise should be specified.
# These should be considered deprecated and will be removed in the future.
# If agent_latency is not defined, define it using the default_latency.
# This matrix defines the communication delay between every pair of
# agents.
if agent_latency is None:
self.agent_latency: List[List[float]] = [
[default_latency] * len(self.agents)
] * len(self.agents)
else:
self.agent_latency = agent_latency
# There is a noise model for latency, intended to be a one-sided
# distribution with the peak at zero. By default there is no noise
# (100% chance to add zero ns extra delay). Format is a list with
# list index = ns extra delay, value = probability of this delay.
self.latency_noise: List[float] = latency_noise
# The kernel maintains an accumulating additional delay parameter
# for the current agent. This is applied to each message sent
# and upon return from wakeup/receive_message, in addition to the
# agent's standard computation delay. However, it never carries
# over to future wakeup/receive_message calls. It is useful for
# staggering of sent messages.
self.current_agent_additional_delay: int = 0
self.show_trace_messages: bool = False
logger.debug(f"Kernel initialized")
def run(self) -> Dict[str, Any]:
"""
Wrapper to run the entire simulation (when not running in ABIDES-Gym mode).
3 Steps:
- Simulation Instantiation
- Simulation Run
- Simulation Termination
Returns:
An object that contains all the objects at the end of the simulation.
"""
self.initialize()
self.runner()
return self.terminate()
# This is called to actually start the simulation, once all agent
# configuration is done.
def initialize(self) -> None:
"""
Instantiation of the simulation:
- Creation of the different object of the simulation.
- Instantiation of the latency network
- Calls on the kernel_initializing and KernelStarting of the different agents
"""
logger.debug("Kernel started")
logger.debug("Simulation started!")
# Note that num_simulations has not yet been really used or tested
# for anything. Instead we have been running multiple simulations
# with coarse parallelization from a shell script
# Event notification for kernel init (agents should not try to
# communicate with other agents, as order is unknown). Agents
# should initialize any internal resources that may be needed
# to communicate with other agents during agent.kernel_starting().
# Kernel passes self-reference for agents to retain, so they can
# communicate with the kernel in the future (as it does not have
# an agentID).
logger.debug("--- Agent.kernel_initializing() ---")
for agent in self.agents:
agent.kernel_initializing(self)
# Event notification for kernel start (agents may set up
# communications or references to other agents, as all agents
# are guaranteed to exist now). Agents should obtain references
# to other agents they require for proper operation (exchanges,
# brokers, subscription services...). Note that we generally
# don't (and shouldn't) permit agents to get direct references
# to other agents (like the exchange) as they could then bypass
# the Kernel, and therefore simulation "physics" to send messages
# directly and instantly or to perform disallowed direct inspection
# of the other agent's state. Agents should instead obtain the
# agent ID of other agents, and communicate with them only via
# the Kernel. Direct references to utility objects that are not
# agents are acceptable (e.g. oracles).
logger.debug("--- Agent.kernel_starting() ---")
for agent in self.agents:
agent.kernel_starting(self.start_time)
# Set the kernel to its start_time.
self.current_time = self.start_time
logger.debug("--- Kernel Clock started ---")
logger.debug("Kernel.current_time is now {}".format(fmt_ts(self.current_time)))
# Start processing the Event Queue.
logger.debug("--- Kernel Event Queue begins ---")
logger.debug(
"Kernel will start processing messages. Queue length: {}".format(
len(self.messages.queue)
)
)
# Track starting wall clock time and total message count for stats at the end.
self.event_queue_wall_clock_start = datetime.now()
self.ttl_messages = 0
def runner(
self, agent_actions: Optional[Tuple[Agent, List[Dict[str, Any]]]] = None
) -> Dict[str, Any]:
"""
Start the simulation and processing of the message queue.
Possibility to add the optional argument agent_actions. It is a list of dictionaries corresponding
to actions to be performed by the experimental agent (Gym Agent).
Arguments:
agent_actions: A list of the different actions to be performed represented in a dictionary per action.
Returns:
- it is a dictionnary composed of two elements:
- "done": boolean True if the simulation is done, else False. It is true when simulation reaches end_time or when the message queue is empty.
- "results": it is the raw_state returned by the gym experimental agent, contains data that will be formated in the gym environement to formulate state, reward, info etc.. If
there is no gym experimental agent, then it is None.
"""
# run an action on a given agent before resuming queue: to be used to take exp agent action before resuming run
if agent_actions is not None:
exp_agent, action_list = agent_actions
exp_agent.apply_actions(action_list)
# Process messages until there aren't any (at which point there never can
# be again, because agents only "wake" in response to messages), or until
# the kernel stop time is reached.
while (
not self.messages.empty()
and self.current_time
and (self.current_time <= self.stop_time)
):
# Get the next message in timestamp order (delivery time) and extract it.
self.current_time, event = self.messages.get()
assert self.current_time is not None
sender_id, recipient_id, message = event
# Periodically print the simulation time and total messages, even if muted.
if self.ttl_messages % 100000 == 0:
logger.info(
"--- Simulation time: {}, messages processed: {:,}, wallclock elapsed: {:.2f}s ---".format(
fmt_ts(self.current_time),
self.ttl_messages,
(
datetime.now() - self.event_queue_wall_clock_start
).total_seconds(),
)
)
if self.show_trace_messages:
logger.debug("--- Kernel Event Queue pop ---")
logger.debug(
"Kernel handling {} message for agent {} at time {}".format(
message.type(), recipient_id, self.current_time
)
)
self.ttl_messages += 1
# In between messages, always reset the current_agent_additional_delay.
self.current_agent_additional_delay = 0
# Dispatch message to agent.
if isinstance(message, WakeupMsg):
# Test to see if the agent is already in the future. If so,
# delay the wakeup until the agent can act again.
if self.agent_current_times[recipient_id] > self.current_time:
# Push the wakeup call back into the PQ with a new time.
self.messages.put(
(
self.agent_current_times[recipient_id],
(sender_id, recipient_id, message),
)
)
if self.show_trace_messages:
logger.debug(
"After wakeup return, agent {} delayed from {} to {}".format(
recipient_id,
fmt_ts(self.current_time),
fmt_ts(self.agent_current_times[recipient_id]),
)
)
continue
# Set agent's current time to global current time for start
# of processing.
self.agent_current_times[recipient_id] = self.current_time
# Wake the agent and get value passed to kernel to listen for kernel interruption signal
wakeup_result = self.agents[recipient_id].wakeup(self.current_time)
# Delay the agent by its computation delay plus any transient additional delay requested.
self.agent_current_times[recipient_id] += (
self.agent_computation_delays[recipient_id]
+ self.current_agent_additional_delay
)
if self.show_trace_messages:
logger.debug(
"After wakeup return, agent {} delayed from {} to {}".format(
recipient_id,
fmt_ts(self.current_time),
fmt_ts(self.agent_current_times[recipient_id]),
)
)
# catch kernel interruption signal and return wakeup_result which is the raw state from gym agent
if wakeup_result != None:
return {"done": False, "result": wakeup_result}
else:
# Test to see if the agent is already in the future. If so,
# delay the message until the agent can act again.
if self.agent_current_times[recipient_id] > self.current_time:
# Push the message back into the PQ with a new time.
self.messages.put(
(
self.agent_current_times[recipient_id],
(sender_id, recipient_id, message),
)
)
if self.show_trace_messages:
logger.debug(
"Agent in future: message requeued for {}".format(
fmt_ts(self.agent_current_times[recipient_id])
)
)
continue
# Set agent's current time to global current time for start
# of processing.
self.agent_current_times[recipient_id] = self.current_time
# Deliver the message.
if isinstance(message, MessageBatch):
messages = message.messages
else:
messages = [message]
for message in messages:
# Delay the agent by its computation delay plus any transient additional delay requested.
self.agent_current_times[recipient_id] += (
self.agent_computation_delays[recipient_id]
+ self.current_agent_additional_delay
)
if self.show_trace_messages:
logger.debug(
"After receive_message return, agent {} delayed from {} to {}".format(
recipient_id,
fmt_ts(self.current_time),
fmt_ts(self.agent_current_times[recipient_id]),
)
)
self.agents[recipient_id].receive_message(
self.current_time, sender_id, message
)
if self.messages.empty():
logger.debug("--- Kernel Event Queue empty ---")
if self.current_time and (self.current_time > self.stop_time):
logger.debug("--- Kernel Stop Time surpassed ---")
# if gets here means sim queue is fully processed, return to show sim is done
if len(self.gym_agents) > 0:
self.gym_agents[0].update_raw_state()
return {"done": True, "result": self.gym_agents[0].get_raw_state()}
else:
return {"done": True, "result": None}
def terminate(self) -> Dict[str, Any]:
"""
Termination of the simulation. Called once the queue is empty, or the gym environement is done, or the simulation
reached kernel stop time:
- Calls the kernel_stopping of the agents
- Calls the kernel_terminating of the agents
Returns:
custom_state: it is an object that contains everything in the simulation. In particular it is useful to retrieve agents and/or logs after the simulation to proceed to analysis.
"""
# Record wall clock stop time and elapsed time for stats at the end.
event_queue_wall_clock_stop = datetime.now()
event_queue_wall_clock_elapsed = (
event_queue_wall_clock_stop - self.event_queue_wall_clock_start
)
# Event notification for kernel end (agents may communicate with
# other agents, as all agents are still guaranteed to exist).
# Agents should not destroy resources they may need to respond
# to final communications from other agents.
logger.debug("--- Agent.kernel_stopping() ---")
for agent in self.agents:
agent.kernel_stopping()
# Event notification for kernel termination (agents should not
# attempt communication with other agents, as order of termination
# is unknown). Agents should clean up all used resources as the
# simulation program may not actually terminate if num_simulations > 1.
logger.debug("\n--- Agent.kernel_terminating() ---")
for agent in self.agents:
agent.kernel_terminating()
logger.info(
"Event Queue elapsed: {}, messages: {:,}, messages per second: {:0.1f}".format(
event_queue_wall_clock_elapsed,
self.ttl_messages,
self.ttl_messages / event_queue_wall_clock_elapsed.total_seconds(),
)
)
# The Kernel adds a handful of custom state results for all simulations,
# which configurations may use, print, log, or discard.
self.custom_state[
"kernel_event_queue_elapsed_wallclock"
] = event_queue_wall_clock_elapsed
self.custom_state["kernel_slowest_agent_finish_time"] = max(
self.agent_current_times
)
self.custom_state["agents"] = self.agents
# Agents will request the Kernel to serialize their agent logs, usually
# during kernel_terminating, but the Kernel must write out the summary
# log itself.
self.write_summary_log()
# This should perhaps be elsewhere, as it is explicitly financial, but it
# is convenient to have a quick summary of the results for now.
logger.info("Mean ending value by agent type:")
for a in self.mean_result_by_agent_type:
value = self.mean_result_by_agent_type[a]
count = self.agent_count_by_type[a]
logger.info(f"{a}: {int(round(value / count)):d}")
logger.info("Simulation ending!")
return self.custom_state
def reset(self) -> None:
"""
Used in the gym core environment:
- First calls termination of the kernel, to close previous simulation
- Then initializes a new simulation
- Then runs the simulation (not specifying any action this time).
"""
if self.has_run: # meaning at leat initialization has been run once
self.terminate()
self.initialize()
self.runner()
def send_message(
self, sender_id: int, recipient_id: int, message: Message, delay: int = 0
) -> None:
"""
Called by an agent to send a message to another agent.
The kernel supplies its own current_time (i.e. "now") to prevent possible abuse
by agents. The kernel will handle computational delay penalties and/or network
latency.
Arguments:
sender_id: ID of the agent sending the message.
recipient_id: ID of the agent receiving the message.
message: The ``Message`` class instance to send.
delay: Represents an agent's request for ADDITIONAL delay (beyond the
Kernel's mandatory computation + latency delays). Represents parallel
pipeline processing delays (that should delay the transmission of
messages but do not make the agent "busy" and unable to respond to new
messages)
"""
# Apply the agent's current computation delay to effectively "send" the message
# at the END of the agent's current computation period when it is done "thinking".
# NOTE: sending multiple messages on a single wake will transmit all at the same
# time, at the end of computation. To avoid this, use Agent.delay() to accumulate
# a temporary delay (current cycle only) that will also stagger messages.
# The optional pipeline delay parameter DOES push the send time forward, since it
# represents "thinking" time before the message would be sent. We don't use this
# for much yet, but it could be important later.
# This means message delay (before latency) is the agent's standard computation
# delay PLUS any accumulated delay for this wake cycle PLUS any one-time
# requested delay for this specific message only.
sent_time = (
self.current_time
+ self.agent_computation_delays[sender_id]
+ self.current_agent_additional_delay
+ delay
)
# Apply communication delay per the agent_latency_model, if defined, or the
# agent_latency matrix [sender_id][recipient_id] otherwise.
if self.agent_latency_model is not None:
latency: float = self.agent_latency_model.get_latency(
sender_id=sender_id, recipient_id=recipient_id
)
deliver_at = sent_time + int(latency)
if self.show_trace_messages:
logger.debug(
"Kernel applied latency {}, accumulated delay {}, one-time delay {} on send_message from: {} to {}, scheduled for {}".format(
latency,
self.current_agent_additional_delay,
delay,
self.agents[sender_id].name,
self.agents[recipient_id].name,
fmt_ts(deliver_at),
)
)
else:
latency = self.agent_latency[sender_id][recipient_id]
noise = self.random_state.choice(
len(self.latency_noise), p=self.latency_noise
)
deliver_at = sent_time + int(latency + noise)
if self.show_trace_messages:
logger.debug(
"Kernel applied latency {}, noise {}, accumulated delay {}, one-time delay {} on send_message from: {} to {}, scheduled for {}".format(
latency,
noise,
self.current_agent_additional_delay,
delay,
self.agents[sender_id].name,
self.agents[recipient_id].name,
fmt_ts(deliver_at),
)
)
# Finally drop the message in the queue with priority == delivery time.
self.messages.put((deliver_at, (sender_id, recipient_id, message)))
if self.show_trace_messages:
logger.debug(
"Sent time: {}, current time {}, computation delay {}".format(
sent_time,
fmt_ts(self.current_time),
self.agent_computation_delays[sender_id],
)
)
logger.debug("Message queued: {}".format(message))
def set_wakeup(
self, sender_id: int, requested_time: Optional[NanosecondTime] = None
) -> None:
"""
Called by an agent to receive a "wakeup call" from the kernel at some requested
future time.
NOTE: The agent is responsible for maintaining any required state; the kernel
will not supply any parameters to the ``wakeup()`` call.
Arguments:
sender_id: The ID of the agent making the call.
requested_time: Defaults to the next possible timestamp. Wakeup time cannot
be the current time or a past time.
"""
if requested_time is None:
requested_time = self.current_time + 1
if self.current_time and (requested_time < self.current_time):
raise ValueError(
"set_wakeup() called with requested time not in future",
"current_time:",
self.current_time,
"requested_time:",
requested_time,
)
if self.show_trace_messages:
logger.debug(
"Kernel adding wakeup for agent {} at time {}".format(
sender_id, fmt_ts(requested_time)
)
)
self.messages.put((requested_time, (sender_id, sender_id, WakeupMsg())))
def get_agent_compute_delay(self, sender_id: int) -> int:
"""
Allows an agent to query its current computation delay.
Arguments:
sender_id: The ID of the agent to get the computational delay for.
"""
return self.agent_computation_delays[sender_id]
def set_agent_compute_delay(self, sender_id: int, requested_delay: int) -> None:
"""
Called by an agent to update its computation delay.
This does not initiate a global delay, nor an immediate delay for the agent.
Rather it sets the new default delay for the calling agent. The delay will be
applied upon every return from wakeup or recvMsg. Note that this delay IS
applied to any messages sent by the agent during the current wake cycle
(simulating the messages popping out at the end of its "thinking" time).
Also note that we DO permit a computation delay of zero, but this should really
only be used for special or massively parallel agents.
Arguments:
sender_id: The ID of the agent making the call.
requested_delay: delay given in nanoseconds.
"""
# requested_delay should be in whole nanoseconds.
if not isinstance(requested_delay, int):
raise ValueError(
"Requested computation delay must be whole nanoseconds.",
"requested_delay:",
requested_delay,
)
# requested_delay must be non-negative.
if requested_delay < 0:
raise ValueError(
"Requested computation delay must be non-negative nanoseconds.",
"requested_delay:",
requested_delay,
)
self.agent_computation_delays[sender_id] = requested_delay
def delay_agent(self, sender_id: int, additional_delay: int) -> None:
"""
Called by an agent to accumulate temporary delay for the current wake cycle.
This will apply the total delay (at time of send_message) to each message, and
will modify the agent's next available time slot. These happen on top of the
agent's compute delay BUT DO NOT ALTER IT. (i.e. effects are transient). Mostly
useful for staggering outbound messages.
Arguments:
sender_id: The ID of the agent making the call.
additional_delay: additional delay given in nanoseconds.
"""
# additional_delay should be in whole nanoseconds.
if not isinstance(additional_delay, int):
raise ValueError(
"Additional delay must be whole nanoseconds.",
"additional_delay:",
additional_delay,
)
# additional_delay must be non-negative.
if additional_delay < 0:
raise ValueError(
"Additional delay must be non-negative nanoseconds.",
"additional_delay:",
additional_delay,
)
self.current_agent_additional_delay += additional_delay
def find_agents_by_type(self, agent_type: Type[Agent]) -> List[int]:
"""
Returns the IDs of any agents that are of the given type.
Arguments:
type: The agent type to search for.
Returns:
A list of agent IDs that are instances of the type.
"""
return [agent.id for agent in self.agents if isinstance(agent, agent_type)]
def write_log(
self, sender_id: int, df_log: pd.DataFrame, filename: Optional[str] = None
) -> None:
"""
Called by any agent, usually at the very end of the simulation just before
kernel shutdown, to write to disk any log dataframe it has been accumulating
during simulation.
The format can be decided by the agent, although changes will require a special
tool to read and parse the logs. The Kernel places the log in a unique
directory per run, with one filename per agent, also decided by the Kernel using
agent type, id, etc.
If there are too many agents, placing all these files in a directory might be
unfortunate. Also if there are too many agents, or if the logs are too large,
memory could become an issue. In this case, we might have to take a speed hit to
write logs incrementally.
If filename is not None, it will be used as the filename. Otherwise, the Kernel
will construct a filename based on the name of the Agent requesting log archival.
Arguments:
sender_id: The ID of the agent making the call.
df_log: dataframe representation of the log that contains all the events logged during the simulation.
filename: Location on disk to write the log to.
"""
if self.skip_log:
return
path = os.path.join(".", "log", self.log_dir)
if filename:
file = "{}.bz2".format(filename)
else:
file = "{}.bz2".format(self.agents[sender_id].name.replace(" ", ""))
if not os.path.exists(path):
os.makedirs(path)
df_log.to_pickle(os.path.join(path, file), compression="bz2")
def append_summary_log(self, sender_id: int, event_type: str, event: Any) -> None:
"""
We don't even include a timestamp, because this log is for one-time-only summary
reporting, like starting cash, or ending cash.
Arguments:
sender_id: The ID of the agent making the call.
event_type: The type of the event.
event: The event to append to the log.
"""
self.summary_log.append(
{
"AgentID": sender_id,
"AgentStrategy": self.agents[sender_id].type,
"EventType": event_type,
"Event": event,
}
)
def write_summary_log(self) -> None:
path = os.path.join(".", "log", self.log_dir)
file = "summary_log.bz2"
if not os.path.exists(path):
os.makedirs(path)
df_log = pd.DataFrame(self.summary_log)
df_log.to_pickle(os.path.join(path, file), compression="bz2")
def update_agent_state(self, agent_id: int, state: Any) -> None:
"""
Called by an agent that wishes to replace its custom state in the dictionary the
Kernel will return at the end of simulation. Shared state must be set directly,
and agents should coordinate that non-destructively.
Note that it is never necessary to use this kernel state dictionary for an agent
to remember information about itself, only to report it back to the config file.
Arguments:
agent_id: The agent to update state for.
state: The new state.
"""
if "agent_state" not in self.custom_state:
self.custom_state["agent_state"] = {}
self.custom_state["agent_state"][agent_id] = state
| 36,090 | 42.222754 | 188 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/experimental_agents/financial_gym_agent.py
|
from collections import deque
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator, InterArrivalTimeGenerator
from abides_markets.agents.background_v2.core_background_agent import (
CoreBackgroundAgent,
)
from abides_markets.orders import Order
from .core_gym_agent import CoreGymAgent
class FinancialGymAgent(CoreBackgroundAgent, CoreGymAgent):
"""
Gym experimental agent class. This agent is the interface between the ABIDES simulation and the ABIDES Gym environments.
Arguments:
- id: agents id in the simulation
- symbol: ticker of the traded asset
- starting_cash: agent's cash at the beginning of the simulation
- subscribe_freq: frequency the agents receives market data from the exchange
- subscribe: flag if the agent subscribe or not to market data
- subscribe_num_levels: number of level depth in the OB the agent subscribes to
- wakeup_interval_generator: inter-wakeup generator for agents next wakeup generation
- state_buffer_length: length of the buffer of the agent raw_states
_ market_data_buffer_length: length of the buffer for the received market data
"""
def __init__(
self,
id: int,
symbol: str,
starting_cash: int,
subscribe_freq: int = int(1e8),
subscribe: float = True,
subscribe_num_levels: int = 10,
wakeup_interval_generator: InterArrivalTimeGenerator = ConstantTimeGenerator(
step_duration=str_to_ns("1min")
),
state_buffer_length: int = 2,
market_data_buffer_length: int = 5,
first_interval: Optional[NanosecondTime] = None,
log_orders: bool = False,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
) -> None:
super().__init__(
id,
symbol=symbol,
starting_cash=starting_cash,
log_orders=log_orders,
name=name,
type=type,
random_state=random_state,
wakeup_interval_generator=wakeup_interval_generator,
state_buffer_length=state_buffer_length,
market_data_buffer_length=market_data_buffer_length,
first_interval=first_interval,
subscribe=subscribe,
subscribe_num_levels=subscribe_num_levels,
subscribe_freq=subscribe_freq,
)
self.symbol: str = symbol
# Frequency of agent data subscription up in ns-1
self.subscribe_freq: int = subscribe_freq
self.subscribe: bool = subscribe
self.subscribe_num_levels: int = subscribe_num_levels
self.wakeup_interval_generator: InterArrivalTimeGenerator = (
wakeup_interval_generator
)
self.lookback_period: NanosecondTime = self.wakeup_interval_generator.mean()
if hasattr(self.wakeup_interval_generator, "random_generator"):
self.wakeup_interval_generator.random_generator = self.random_state
self.state_buffer_length: int = state_buffer_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: Optional[NanosecondTime] = first_interval
# internal variables
self.has_subscribed: bool = False
self.episode_executed_orders: List[
Order
] = [] # list of executed orders during full episode
# list of executed orders between steps - is reset at every step
self.inter_wakeup_executed_orders: List[Order] = []
self.parsed_episode_executed_orders: List[Tuple[int, int]] = [] # (price, qty)
self.parsed_inter_wakeup_executed_orders: List[
Tuple[int, int]
] = [] # (price, qty)
self.parsed_mkt_data: Dict[str, Any] = {}
self.parsed_mkt_data_buffer = deque(maxlen=self.market_data_buffer_length)
self.parsed_volume_data = {}
self.parsed_volume_data_buffer = deque(maxlen=self.market_data_buffer_length)
self.raw_state = deque(maxlen=self.state_buffer_length)
# dictionary to track order status:
# - keys = order_id
# - value = dictionary {'active'|'cancelled'|'executed', Order, 'active_qty','executed_qty', 'cancelled_qty }
self.order_status: Dict[int, Dict[str, Any]] = {}
def act_on_wakeup(self) -> Dict:
"""
Computes next wakeup time, computes the new raw_state and clears the internal step buffers.
Returns the raw_state to the abides gym environnement (outside of the abides simulation) where the next action will be selected.
Returns:
- the raw_state dictionnary that will be processed in the abides gym subenvironment
"""
# compute the state (returned to the Gym Env)
# wakeup logic
wake_time = (
self.current_time + self.wakeup_interval_generator.next()
) # generates next wakeup time
self.set_wakeup(wake_time)
self.update_raw_state()
raw_state = deepcopy(self.get_raw_state())
self.new_step_reset()
# return non None value so the kernel catches it and stops
return raw_state
| 5,397 | 40.523077 | 136 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/experimental_agents/core_gym_agent.py
|
from abc import abstractmethod, ABC
from collections import deque
from abides_core import Agent
class CoreGymAgent(Agent, ABC):
"""
Abstract class to inherit from to create usable specific ABIDES Gym Experiemental Agents
"""
@abstractmethod
def update_raw_state(self) -> None:
raise NotImplementedError
@abstractmethod
def get_raw_state(self) -> deque:
raise NotImplementedError
| 428 | 21.578947 | 92 |
py
|
FinRL_Market_Simulator
|
FinRL_Market_Simulator-master/abides-vecenv/experimental_agents/__init__.py
|
from .core_gym_agent import CoreGymAgent
from .financial_gym_agent import FinancialGymAgent
| 92 | 30 | 50 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/calc_img_inputs.py
|
import torch
import numpy as np
import cv2
from collections import OrderedDict
import os
import glob
# import scipy.io as sio
from torch.utils.data import Dataset, DataLoader
from vad_datasets import ped_dataset, avenue_dataset, shanghaiTech_dataset
from FlowNet2_src import FlowNet2, flow_to_image
from torch.autograd import Variable
# from FlowNet2_src.flowlib import flow_to_image
def calc_optical_flow(dataset):
of_root_dir = './optical_flow'
len_original_root_dir = len(dataset.dir.split('/')) - 1
print(len_original_root_dir)
flownet2 = FlowNet2()
path = 'FlowNet2_src/pretrained/FlowNet2_checkpoint.pth.tar'
pretrained_dict = torch.load(path)['state_dict']
model_dict = flownet2.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
flownet2.load_state_dict(model_dict)
flownet2.cuda()
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
for idx, (batch, _) in enumerate(dataset_loader):
print(1)
print('Calculating optical flow for {}-th frame'.format(idx+1))
cur_img_addr = dataset.all_frame_addr[idx]
cur_img_name = cur_img_addr.split('/')[-1]
cur_img_name = cur_img_name.split('.')[0]
# parent path to store optical flow
of_path = of_root_dir
tmp_path_segment = cur_img_addr.split('/')[len_original_root_dir: -1]
for cur_seg in tmp_path_segment:
of_path = os.path.join(of_path, cur_seg)
if os.path.exists(of_path) is False:
os.makedirs(of_path, exist_ok=True)
# calculate new img inputs: optical flow by flownet2
cur_imgs = np.transpose(batch[0].numpy(), [0, 2, 3, 1])
frameRange = dataset.context_range(idx)
old_size = (cur_imgs.shape[2], cur_imgs.shape[1])
if frameRange[1] == frameRange[0] or frameRange[1] == frameRange[2]:
if cur_imgs.shape[3] == 1:
im1 = cv2.resize(cur_imgs[0], (512, 384))[:, :, np.newaxis]
im2 = cv2.resize(cur_imgs[1], (512, 384))[:, :, np.newaxis]
im1 = np.concatenate([im1] * 3, axis=2)
im2 = np.concatenate([im2] * 3, axis=2)
else:
im1 = cv2.resize(cur_imgs[0], (512, 384))
im2 = cv2.resize(cur_imgs[1], (512, 384))
ims = np.array([[im1, im2]]).transpose((0, 4, 1, 2, 3)).astype(np.float32)
ims = torch.from_numpy(ims)
ims_v = Variable(ims.cuda(), requires_grad=False)
pred_flow = flownet2(ims_v).cpu().data
pred_flow = pred_flow[0].numpy().transpose((1, 2, 0))
new_inputs = cv2.resize(pred_flow, old_size)
else:
if cur_imgs.shape[3] == 1:
im1 = cv2.resize(cur_imgs[1], (512, 384))[:, :, np.newaxis]
im2 = cv2.resize(cur_imgs[2], (512, 384))[:, :, np.newaxis]
im1 = np.concatenate([im1] * 3, axis=2)
im2 = np.concatenate([im2] * 3, axis=2)
else:
im1 = cv2.resize(cur_imgs[1], (512, 384))
im2 = cv2.resize(cur_imgs[2], (512, 384))
ims = np.array([[im1, im2]]).transpose((0, 4, 1, 2, 3)).astype(np.float32)
ims = torch.from_numpy(ims)
ims_v = Variable(ims.cuda(), requires_grad=False)
pred_flow = flownet2(ims_v).cpu().data
pred_flow = pred_flow[0].numpy().transpose((1, 2, 0))
# visualization
# cv2.imshow('of', flow_to_image(pred_flow))
# cv2.waitKey(0)
new_inputs = cv2.resize(pred_flow, old_size)
# save new raw inputs
np.save(os.path.join(of_path, cur_img_name+'.npy'), new_inputs)
if __name__ == '__main__':
# mode = train or test. 'train' and 'test' are used for calculating optical flow of training dataset and testing dataset respectively.
dataset = ped_dataset(dir='./raw_datasets/UCSDped2', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = ped_dataset(dir='./raw_datasets/UCSDped2', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
# The optical flow calculation of avenue and ShanghaiTech sets is basically the same as above
dataset = avenue_dataset(dir='./raw_datasets/avenue', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = avenue_dataset(dir='./raw_datasets/avenue', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
dataset = shanghaiTech_dataset(dir='./raw_datasets/ShanghaiTech', context_frame_num=1, mode='train', border_mode='hard')
calc_optical_flow(dataset)
dataset = shanghaiTech_dataset(dir='./raw_datasets/ShanghaiTech', context_frame_num=1, mode='test', border_mode='hard')
calc_optical_flow(dataset)
| 4,952 | 44.027273 | 138 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/test.py
|
from xml.sax.xmlreader import InputSource
import torch
import numpy as np
import os
from torch.utils.data import DataLoader
from vad_datasets import unified_dataset_interface
from vad_datasets import bbox_collate, img_tensor2numpy, img_batch_tensor2numpy, frame_size, cube_to_train_dataset
from state_model import ConvTransformer_recon_correct
import torch.nn as nn
from utils import save_roc_pr_curve_data
import time
import argparse
import os
import sys
# from helper.visualization_helper import visualize_pair, visualize_batch, visualize_recon, visualize_pair_map
pyfile_name = "train"
pyfile_name_score = os.path.basename(sys.argv[0]).split(".")[0]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', default='UCSDped2', type=str)
parser.add_argument('-n_l', '--num_layers', default=3, type=int)
parser.add_argument('-n_h', '--num_heads', default=4, type=int)
parser.add_argument('-pe', '--positional_encoding', default='learned', type=str)
parser.add_argument('-e', '--epochs', default=20, type=int)
parser.add_argument('-b', '--batch_size', default=128, type=int)
parser.add_argument('-l', '--temporal_length', default=3, type=int)
parser.add_argument('-lam_r', '--lambda_raw', default=1, type=float)
parser.add_argument('-lam_o', '--lambda_of', default=1, type=float)
parser.add_argument('-w_r', '--w_raw', default=1, type=float)
parser.add_argument('-w_o', '--w_of', default=1, type=float)
parser.add_argument('-test_b', '--test_bbox_saved', type=str2bool, default=True)
parser.add_argument('-test_f', '--test_foreground_saved', type=str2bool, default=True)
parser.add_argument('-f', '--use_flow', default=True, type=str2bool)
parser.add_argument('-s', '--scores_saved', default=False, type=str2bool)
parser.add_argument('-ep', '--epsilon', default=0.01, type=float)
args = parser.parse_args()
def calc_block_idx(x_min, x_max, y_min, y_max, h_step, w_step, mode):
all_blocks = list()
center = np.array([(y_min + y_max) / 2, (x_min + x_max) / 2])
all_blocks.append(center + center)
if mode > 1:
all_blocks.append(np.array([y_min, center[1]]) + center)
all_blocks.append(np.array([y_max, center[1]]) + center)
all_blocks.append(np.array([center[0], x_min]) + center)
all_blocks.append(np.array([center[0], x_max]) + center)
if mode >= 9:
all_blocks.append(np.array([y_min, x_min]) + center)
all_blocks.append(np.array([y_max, x_max]) + center)
all_blocks.append(np.array([y_max, x_min]) + center)
all_blocks.append(np.array([y_min, x_max]) + center)
all_blocks = np.array(all_blocks) / 2
h_block_idxes = all_blocks[:, 0] / h_step
w_block_idxes = all_blocks[:, 1] / w_step
h_block_idxes, w_block_idxes = list(h_block_idxes.astype(np.int)), list(w_block_idxes.astype(np.int))
# delete repeated elements
all_blocks = set([x for x in zip(h_block_idxes, w_block_idxes)])
all_blocks = [x for x in all_blocks]
return all_blocks
# /*------------------------------------overall parameter setting------------------------------------------*/
dataset_name = args.dataset
raw_dataset_dir = 'raw_datasets'
foreground_extraction_mode = 'obj_det_with_motion'
data_root_dir = 'data'
modality = 'raw2flow'
mode ='test'
method = 'SelfComplete'
num_layers = args.num_layers
num_heads = args.num_heads
pe = args.positional_encoding
context_frame_num = args.temporal_length
context_of_num = args.temporal_length
patch_size = 32
h_block = 1
w_block = 1
test_block_mode = 1
bbox_saved = args.test_bbox_saved
foreground_saved = args.test_foreground_saved
motionThr = 0
epochs = args.epochs
# visual_save_dir = args.save_dir
# /*------------------------------------------foreground extraction----------------------------------------------*/
config_file = './obj_det_config/cascade_rcnn_r101_fpn_1x.py'
checkpoint_file = './obj_det_checkpoints/cascade_rcnn_r101_fpn_1x_20181129-d64ebac7.pth'
# set dataset for foreground extraction
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=1, mode=mode, border_mode='hard')
if not bbox_saved:
from fore_det.inference import init_detector
from fore_det.obj_det_with_motion import imshow_bboxes, getObBboxes, getFgBboxes, delCoverBboxes
from fore_det.simple_patch import get_patch_loc
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
collate_func = bbox_collate('test')
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=collate_func.collate)
all_bboxes = list()
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
print('Extracting bboxes of {}-th frame'.format(idx + 1))
cur_img = img_tensor2numpy(batch[1])
if foreground_extraction_mode == 'obj_det_with_motion':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
ob_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
# further foreground detection by motion
fg_bboxes = getFgBboxes(cur_img, img_batch_tensor2numpy(batch), ob_bboxes, dataset_name, verbose=False)
if fg_bboxes.shape[0] > 0:
cur_bboxes = np.concatenate((ob_bboxes, fg_bboxes), axis=0)
else:
cur_bboxes = ob_bboxes
elif foreground_extraction_mode == 'obj_det':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
cur_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
elif foreground_extraction_mode == 'simple_patch':
patch_num_list = [(3, 4), (6, 8)]
cur_bboxes = list()
for h_num, w_num in patch_num_list:
cur_bboxes.append(get_patch_loc(frame_size[dataset_name][0], frame_size[dataset_name][1], h_num, w_num))
cur_bboxes = np.concatenate(cur_bboxes, axis=0)
else:
raise NotImplementedError
all_bboxes.append(cur_bboxes)
np.save(os.path.join(dataset.dir, 'bboxes_test_{}.npy'.format(foreground_extraction_mode)), all_bboxes)
print('bboxes for testing data saved!')
else:
all_bboxes = np.load(os.path.join(dataset.dir, 'bboxes_test_{}.npy'.format(foreground_extraction_mode)),
allow_pickle=True)
print('bboxes for testing data loaded!')
# /------------------------- extract foreground using extracted bboxes---------------------------------------/
# set dataset for foreground bbox extraction
if method == 'SelfComplete':
border_mode = 'elastic'
else:
border_mode = 'hard'
if not foreground_saved:
if modality == 'raw_datasets':
file_format = frame_size[dataset_name][2]
elif modality == 'raw2flow':
file_format1 = frame_size[dataset_name][2]
file_format2 = '.npy'
else:
file_format = '.npy'
# set dataset for foreground bbox extraction
if modality == 'raw2flow':
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('raw_datasets', dataset_name),
context_frame_num=context_frame_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format1)
dataset2 = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('optical_flow', dataset_name),
context_frame_num=context_of_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format2)
else:
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(modality, dataset_name),
context_frame_num=context_frame_num, mode=mode,
border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size,
file_format=file_format)
if dataset_name == 'ShanghaiTech':
np.save(os.path.join(data_root_dir, modality, dataset_name + '_' + 'scene_idx.npy'), dataset.scene_idx)
scene_idx = dataset.scene_idx
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
if modality == 'raw2flow':
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
foreground_bbox_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ii in range(dataset.__len__())]
h_step, w_step = frame_size[dataset_name][0] / h_block, frame_size[dataset_name][1] / w_block
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=bbox_collate(mode=mode).collate)
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
if modality == 'raw2flow':
batch2, _ = dataset2.__getitem__(idx)
print('Extracting foreground in {}-th batch, {} in total'.format(idx + 1, dataset.__len__() // 1))
cur_bboxes = all_bboxes[idx]
if len(cur_bboxes) > 0:
batch = img_batch_tensor2numpy(batch)
if modality == 'raw2flow':
batch2 = img_batch_tensor2numpy(batch2)
if modality == 'optical_flow':
if len(batch.shape) == 4:
mag = np.sum(np.sum(np.sum(batch ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch ** 2, axis=4), axis=3), axis=2), axis=1)
elif modality == 'raw2flow':
if len(batch2.shape) == 4:
mag = np.sum(np.sum(np.sum(batch2 ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch2 ** 2, axis=4), axis=3), axis=2), axis=1)
else:
mag = np.ones(batch.shape[0]) * 10000
for idx_bbox in range(cur_bboxes.shape[0]):
if mag[idx_bbox] > motionThr:
all_blocks = calc_block_idx(cur_bboxes[idx_bbox, 0], cur_bboxes[idx_bbox, 2],
cur_bboxes[idx_bbox, 1], cur_bboxes[idx_bbox, 3], h_step, w_step,
mode=test_block_mode)
for (h_block_idx, w_block_idx) in all_blocks:
foreground_set[idx][h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[idx][h_block_idx][w_block_idx].append(batch2[idx_bbox])
foreground_bbox_set[idx][h_block_idx][w_block_idx].append(cur_bboxes[idx_bbox])
foreground_set = [[[np.array(foreground_set[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ii
in range(dataset.__len__())]
if modality == 'raw2flow':
foreground_set2 = [[[np.array(foreground_set2[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ii in range(dataset.__len__())]
foreground_bbox_set = [
[[np.array(foreground_bbox_set[ii][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ii in
range(dataset.__len__())]
if modality == 'raw2flow':
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-raw.npy'.format(foreground_extraction_mode,
context_frame_num, border_mode)),
foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-flow.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set2)
else:
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_bbox_test_{}.npy'.format(foreground_extraction_mode)),
foreground_bbox_set)
print('foreground for testing data saved!')
else:
if dataset_name == 'ShanghaiTech':
scene_idx = np.load(os.path.join(data_root_dir, modality, dataset_name + '_' + 'scene_idx.npy'))
if modality == 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
else:
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_test_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_bbox_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_bbox_test_{}.npy'.format(
foreground_extraction_mode)), allow_pickle=True)
print('foreground for testing data loaded!')
# /*------------------------------------------Abnormal event detection----------------------------------------------*/
results_dir = 'results'
scores_saved = args.scores_saved
big_number = 100000
time_start=time.time()
loss_func_perturb = nn.MSELoss()
if scores_saved is False:
if method == 'SelfComplete':
h, w, _, sn = frame_size[dataset_name]
if border_mode == 'predict':
tot_frame_num = context_frame_num + 1
tot_of_num = context_of_num + 1
else:
tot_frame_num = 2 * context_frame_num + 1
tot_of_num = 2 * context_of_num + 1
rawRange = 10
if rawRange >= tot_frame_num:
rawRange = None
useFlow = args.use_flow
padding = False
assert modality == 'raw2flow'
loss_func = nn.MSELoss(reduce=False)
in_channels = 3
pixel_result_dir = os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score))
os.makedirs(pixel_result_dir, exist_ok=True)
model_weights = torch.load(os.path.join(data_root_dir, modality, dataset_name + '_' + 'model_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
if dataset_name == 'ShanghaiTech':
model_set = [[[[] for ww in range(len(model_weights[ss][hh]))] for hh in range(len(model_weights[ss]))]
for ss in range(len(model_weights))]
for ss in range(len(model_weights)):
for hh in range(len(model_weights[ss])):
for ww in range(len(model_weights[ss][hh])):
if len(model_weights[ss][hh][ww]) > 0:
cur_model = torch.nn.DataParallel(
ConvTransformer_recon_correct(
tot_raw_num=tot_frame_num, nums_hidden=[32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=args.use_flow)).cuda()
cur_model.load_state_dict(model_weights[ss][hh][ww][0])
model_set[ss][hh][ww].append(cur_model.eval())
# get training scores statistics
raw_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, args.lambda_raw, args.lambda_of)+ '_' + 'pyname_{}.npy'.format(pyfile_name)))
of_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
raw_stats_set = [[[(np.mean(raw_training_scores_set[ss][hh][ww]),
np.std(raw_training_scores_set[ss][hh][ww])) for ww in range(len(model_weights[hh]))]
for hh in range(len(model_weights))] for ss in range(len(model_weights))]
if useFlow:
of_stats_set = [[[(np.mean(of_training_scores_set[ss][hh][ww]),
np.std(of_training_scores_set[ss][hh][ww])) for ww in range(len(model_weights[hh]))]
for hh in range(len(model_weights))] for ss in range(len(model_weights))]
del raw_training_scores_set, of_training_scores_set
else:
model_set = [[[] for ww in range(len(model_weights[hh]))] for hh in range(len(model_weights))]
for hh in range(len(model_weights)):
for ww in range(len(model_weights[hh])):
if len(model_weights[hh][ww]) > 0:
cur_model = torch.nn.DataParallel(
ConvTransformer_recon_correct(
tot_raw_num=tot_frame_num, nums_hidden=[32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=args.use_flow)).cuda()
print(model_weights[hh][ww][0].keys())
cur_model.load_state_dict(model_weights[hh][ww][0])
model_set[hh][ww].append(cur_model.eval())
# get training scores statistics
raw_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
of_training_scores_set = torch.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format( border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
# mean and std of training scores
raw_stats_set = [
[(np.mean(raw_training_scores_set[hh][ww]), np.std(raw_training_scores_set[hh][ww])) for ww in
range(len(model_weights[hh]))] for hh in range(len(model_weights))]
if useFlow:
of_stats_set = [
[(np.mean(of_training_scores_set[hh][ww]), np.std(of_training_scores_set[hh][ww])) for ww in
range(len(model_weights[hh]))] for hh in range(len(model_weights))]
del raw_training_scores_set, of_training_scores_set
# Get scores
for frame_idx in range(len(foreground_set)):
print('Calculating scores for {}-th frame'.format(frame_idx))
cur_data_set = foreground_set[frame_idx]
cur_data_set2 = foreground_set2[frame_idx]
cur_bboxes = foreground_bbox_set[frame_idx]
cur_pixel_results = -1 * np.ones(shape=(h, w)) * big_number
for h_idx in range(len(cur_data_set)):
for w_idx in range(len(cur_data_set[h_idx])):
if len(cur_data_set[h_idx][w_idx]) > 0:
if dataset_name == 'ShanghaiTech':
if len(model_set[scene_idx[frame_idx] - 1][h_idx][w_idx]) > 0:
# print(scene_idx[frame_idx])
cur_model = model_set[scene_idx[frame_idx] - 1][h_idx][w_idx][0]
cur_dataset = cube_to_train_dataset(cur_data_set[h_idx][w_idx],
target=cur_data_set2[h_idx][w_idx])
cur_dataloader = DataLoader(dataset=cur_dataset,
batch_size=cur_data_set[h_idx][w_idx].shape[0],
shuffle=False)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
inputs = torch.autograd.Variable(inputs, requires_grad= True)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func_perturb(raw_targets, raw_outputs)
if useFlow:
loss_of = loss_func_perturb(of_targets.detach(), of_outputs)
if useFlow:
loss = loss_raw + loss_of
else:
loss = loss_raw
loss.backward()
gradient = inputs.grad.data
sign_gradient = torch.sign(gradient)
middle_start_indice = 3*context_frame_num
inputs.requires_grad = False
inputs = torch.add(inputs.data, -args.epsilon, sign_gradient)
# end of perturb
inputs = torch.autograd.Variable(inputs)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
# # visualization
# for i in range(raw_targets.size(0)):
# visualize_recon(
# batch_1=img_batch_tensor2numpy(raw_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(raw_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# visualize_recon(
# batch_1=img_batch_tensor2numpy(of_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(of_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
if useFlow:
of_scores = loss_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1)
# print(of_scores)# mse
raw_scores = loss_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
# print(raw_scores)
raw_scores = (raw_scores - raw_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][
0]) / raw_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][1]
# print(raw_scores)
if useFlow:
of_scores = (of_scores - of_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][
0]) / of_stats_set[scene_idx[frame_idx] - 1][h_idx][w_idx][1]
# print(of_scores)
if useFlow:
scores = args.w_raw * raw_scores + args.w_of* of_scores
# print(scores)
else:
scores = args.w_raw * raw_scores
else:
scores = np.ones(cur_data_set[h_idx][w_idx].shape[0], ) * big_number
else:
if len(model_set[h_idx][w_idx]) > 0:
cur_model = model_set[h_idx][w_idx][0]
cur_dataset = cube_to_train_dataset(cur_data_set[h_idx][w_idx],
target=cur_data_set2[h_idx][w_idx])
cur_dataloader = DataLoader(dataset=cur_dataset,
batch_size=cur_data_set[h_idx][w_idx].shape[0],
shuffle=False)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
inputs = torch.autograd.Variable(inputs, requires_grad= True)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func_perturb(raw_targets, raw_outputs)
if useFlow:
loss_of = loss_func_perturb(of_targets.detach(), of_outputs)
if useFlow:
loss = loss_raw + loss_of
else:
loss = loss_raw
loss.backward()
gradient = inputs.grad.data
sign_gradient = torch.sign(gradient)
middle_start_indice = 3*context_frame_num
inputs.requires_grad = False
inputs = torch.add(inputs.data, -args.epsilon, sign_gradient)
# end of perturb
inputs = torch.autograd.Variable(inputs)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
# # visualization
# for i in range(raw_targets.size(0)):
# visualize_recon(
# batch_1=img_batch_tensor2numpy(raw_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(raw_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# visualize_recon(
# batch_1=img_batch_tensor2numpy(of_targets.cpu().detach()[i]),
# batch_2=img_batch_tensor2numpy(of_outputs.cpu().detach()[i]),
# frame_idx=frame_idx, obj_id = i, dataset_name = dataset_name, save_dir=visual_save_dir)
# mse
if useFlow:
of_scores = loss_func(of_targets, of_outputs).cpu().data.numpy()
# of_scores = np.sum(of_scores, axis=(4, 3, 2)) # bl
#
# for l in range(of_scores.shape[1]):
# of_scores[:, l] = of_scores[:, l] * (abs(l - context_frame_num) + 1)
# of_scores = np.sum(of_scores, axis=1)
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1)
raw_scores = loss_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1)
# raw_scores = np.sum(raw_scores, axis=(4, 3, 2)) # bl
#
# for l in range(raw_scores.shape[1]):
# raw_scores[:, l] = raw_scores[:, l] * (abs(l - context_frame_num)+1)
# raw_scores = np.sum(raw_scores, axis=1)
# normalize scores using training scores
raw_scores = (raw_scores - raw_stats_set[h_idx][w_idx][0]) / \
raw_stats_set[h_idx][w_idx][1]
if useFlow:
of_scores = (of_scores - of_stats_set[h_idx][w_idx][0]) / \
of_stats_set[h_idx][w_idx][1]
if useFlow:
scores = args.w_raw * raw_scores + args.w_of * of_scores
else:
scores = args.w_raw * raw_scores
# print(scores.shape)
else:
scores = np.ones(cur_data_set[h_idx][w_idx].shape[0], ) * big_number
for m in range(scores.shape[0]):
cur_score_mask = -1 * np.ones(shape=(h, w)) * big_number
cur_score = scores[m]
bbox = cur_bboxes[h_idx][w_idx][m]
x_min, x_max = np.int(np.ceil(bbox[0])), np.int(np.ceil(bbox[2]))
y_min, y_max = np.int(np.ceil(bbox[1])), np.int(np.ceil(bbox[3]))
cur_score_mask[y_min:y_max, x_min:x_max] = cur_score
cur_pixel_results = np.max(
np.concatenate([cur_pixel_results[:, :, np.newaxis], cur_score_mask[:, :, np.newaxis]],
axis=2), axis=2)
torch.save(cur_pixel_results, os.path.join(pixel_result_dir, '{}'.format(frame_idx)))
else:
raise NotImplementedError
# /*------------------------------------------Evaluation----------------------------------------------*/
criterion = 'frame'
batch_size = 1
# set dataset for evaluation
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=0, mode=mode, border_mode='hard')
dataset_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=0,
collate_fn=bbox_collate(mode).collate)
print('Evaluating {} by {}-criterion:'.format(dataset_name, criterion))
if criterion == 'frame':
if dataset_name == 'ShanghaiTech':
all_frame_scores = [[] for si in set(dataset.scene_idx)]
all_targets = [[] for si in set(dataset.scene_idx)]
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score), '{}'.format(idx) ))
all_frame_scores[scene_idx[idx] - 1].append(cur_pixel_results.max())
all_targets[scene_idx[idx] - 1].append(target[0].numpy().max())
all_frame_scores = [np.array(all_frame_scores[si]) for si in range(dataset.scene_num)]
all_targets = [np.array(all_targets[si]) for si in range(dataset.scene_num)]
all_targets = [all_targets[si] > 0 for si in range(dataset.scene_num)]
print(dataset.scene_num)
print(all_frame_scores)
print(all_targets)
results = [save_roc_pr_curve_data(all_frame_scores[si], all_targets[si], os.path.join(results_dir, dataset_name,
'{}_{}_{}_frame_results_scene_{}.npz'.format(
modality,
foreground_extraction_mode,
method, si + 1))) for
si in range(dataset.scene_num)]
results = np.array(results).mean()
print('Average frame-level AUC is {}'.format(results))
print(np.max(all_frame_scores))
print(np.min(all_frame_scores))
else:
all_frame_scores = list()
all_targets = list()
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}_w_{}_{}_perturb_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, args.lambda_raw, args.lambda_of, args.w_raw, args.w_of, args.epsilon) + '_' + 'pyname_{}.npy'.format(pyfile_name_score), '{}'.format(idx)))
all_frame_scores.append(cur_pixel_results.max())
all_targets.append(target[0].numpy().max())
time_end = time.time()
print('time cost', time_end - time_start, 's')
all_frame_scores = np.array(all_frame_scores)
all_targets = np.array(all_targets)
all_targets = all_targets > 0
results_path = os.path.join(results_dir, dataset_name,
'{}_{}_{}_frame_results.npz'.format(modality, foreground_extraction_mode, method))
print('Results written to {}:'.format(results_path))
np.save('output_scores_{}_pyname_{}'.format(dataset_name, pyfile_name_score), all_frame_scores)
np.save('labels_{}'.format(dataset_name), all_targets)
print(all_frame_scores)
print(all_targets)
auc = save_roc_pr_curve_data(all_frame_scores, all_targets, results_path,verbose=True)
print(auc)
elif criterion == 'pixel':
if dataset_name != 'ShanghaiTech':
all_pixel_scores = list()
all_targets = list()
thr = 0.4
for idx, (_, target) in enumerate(dataset_loader):
print('Processing {}-th frame'.format(idx))
cur_pixel_results = torch.load(os.path.join(results_dir, dataset_name, 'score_mask', '{}'.format(idx)))
target_mask = target[0].numpy()
all_targets.append(target[0].numpy().max())
if all_targets[-1] > 0:
cur_effective_scores = cur_pixel_results[target_mask > 0]
sorted_score = np.sort(cur_effective_scores)
cut_off_idx = np.int(np.round((1 - thr) * cur_effective_scores.shape[0]))
cut_off_score = cur_effective_scores[cut_off_idx]
else:
cut_off_score = cur_pixel_results.max()
all_pixel_scores.append(cut_off_score)
all_frame_scores = np.array(all_pixel_scores)
all_targets = np.array(all_targets)
all_targets = all_targets > 0
results_path = os.path.join(results_dir, dataset_name,
'{}_{}_{}_pixel_results.npz'.format(modality, foreground_extraction_mode, method))
print('Results written to {}:'.format(results_path))
results = save_roc_pr_curve_data(all_frame_scores, all_targets, results_path)
else:
raise NotImplementedError
else:
raise NotImplementedError
| 39,523 | 56.868228 | 321 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/state_model.py
|
import torch
import torch.nn as nn
import numpy as np
from module import *
# LSTM
class ConvLSTMCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
class LSTM(nn.Module):
"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.
Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last states
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""
def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(LSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
b, _, _, h, w = input_tensor.size()
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
# Since the init is done in forward. Can send image size here
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
# ------------------------------------------------------------------------------------------------
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv= nn.Sequential(
nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
'''
inconv only changes the number of channels
'''
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
double_conv(in_ch, out_ch),
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up, self).__init__()
self.bilinear=bilinear
if self.bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(in_ch, in_ch//2, 1),)
else:
self.up = nn.ConvTranspose2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.up(x)
# x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class up_unet(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super(up_unet, self).__init__()
self.bilinear=bilinear
if self.bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(in_ch, in_ch//2, 1),)
else:
self.up = nn.ConvTranspose2d(in_channels=in_ch, out_channels=in_ch // 2, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class Spatial_Encoder(nn.Module):
def __init__(self, nums_hidden, channel_num):
super(Spatial_Encoder, self).__init__()
self.inc = inconv(channel_num, nums_hidden[0])
self.down1 = down(nums_hidden[0], nums_hidden[1])
self.down2 = down(nums_hidden[1], nums_hidden[2])
# self.down3 = down(nums_hidden[2], nums_hidden[3])
def forward(self, x):
# print(x.shape)
x = self.inc(x)
# print(x.shape)
x = self.down1(x)
x = self.down2(x)
# x = self.down3(x)
return x
class Spatial_Decoder(nn.Module):
def __init__(self, nums_hidden, channel_num):
super(Spatial_Decoder, self).__init__()
# self.up1 = up(nums_hidden[3], nums_hidden[2])
self.up2 = up(nums_hidden[2], nums_hidden[1])
self.up3 = up(nums_hidden[1], nums_hidden[0])
self.out = outconv(nums_hidden[0], channel_num)
def forward(self, x):
# x = self.up1(x)
x = self.up2(x)
x = self.up3(x)
x = self.out(x)
return x
class ConvTransformer_recon_correct(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, num_layers=1, num_dec_frames=1, num_heads=4, with_residual=True,
with_pos=True, pos_kind='sine', mode=0, use_flow=True):
super(ConvTransformer_recon_correct, self).__init__()
self.raw_channel_num = 3 # RGB channel no.
self.of_channel_num = 2
# self.feature_embedding = FeatureEmbedding(model_depth)
self.feature_embedding = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.encoder = ConvTransformerEncoder(num_layers=num_layers, model_depth=nums_hidden[-1], num_heads=num_heads,
with_residual=with_residual, with_pos=with_pos, pos_kind=pos_kind)
self.prediction = Spatial_Decoder(nums_hidden, self.raw_channel_num)
if use_flow:
self.feature_embedding_of = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.encoder_of = ConvTransformerEncoder(num_layers=num_layers, model_depth=nums_hidden[-1],
num_heads=num_heads,
with_residual=with_residual, with_pos=with_pos, pos_kind=pos_kind)
self.prediction_of = Spatial_Decoder(nums_hidden, self.of_channel_num)
self.task = mode
self.num_dec_frames = num_dec_frames
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.use_flow = use_flow
self.nums_hidden = nums_hidden
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.tot_raw_num, self.raw_channel_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 3, 4, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.tot_of_num, self.of_channel_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 3, 4, 1, 2).contiguous().cuda()
# interpolation
input_frames = input
raw_targets = input # [...,1:]
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
img_tensor = self.feature_embedding(input_frames) # b*l,c_f,h,w
_, c_f, h_small, w_small = img_tensor.shape
img_tensor = torch.reshape(img_tensor, (b, -1, self.nums_hidden[-1], h_small, w_small)) # b,l,c_f,h,w
encoderout = self.encoder(img_tensor) # b,l,c_f,h,w
encoderout = torch.reshape(encoderout, (-1, self.nums_hidden[-1], h_small, w_small))
raw_outputs = self.prediction(encoderout)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = input
input_of = torch.reshape(input_of, (-1, self.raw_channel_num, h, w))
img_tensor_of = self.feature_embedding_of(input_of)
_, c_f, h_small, w_small = img_tensor_of.shape
img_tensor_of = torch.reshape(img_tensor_of, (b, -1, self.nums_hidden[-1], h_small, w_small)) # b,l,c_f,h,w
encoderout_of = self.encoder_of(img_tensor_of) # b,l,c_f,h,w
encoderout_of = torch.reshape(encoderout_of, (-1, self.nums_hidden[-1], h_small, w_small))
of_outputs = self.prediction_of(encoderout_of)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_of_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
class Unet(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, use_flow=True):
super(Unet, self).__init__()
self.use_flow=use_flow
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.raw_channel_num = 3
self.of_channel_num = 2
self.inc = inconv(3, nums_hidden[0])
self.down1 = down(nums_hidden[0], nums_hidden[1])
self.down2 = down(nums_hidden[1], nums_hidden[2])
self.up1 = up_unet(nums_hidden[2], nums_hidden[1])
self.up2 = up_unet(nums_hidden[1], nums_hidden[0])
self.out = outconv(nums_hidden[0], self.raw_channel_num)
#of
if self.use_flow:
self.inc_of = inconv(3, nums_hidden[0])
self.down1_of = down(nums_hidden[0], nums_hidden[1])
self.down2_of = down(nums_hidden[1], nums_hidden[2])
self.up1_of = up_unet(nums_hidden[2], nums_hidden[1])
self.up2_of = up_unet(nums_hidden[1], nums_hidden[0])
self.out_of = outconv(nums_hidden[0], self.of_channel_num)
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.raw_channel_num, self.tot_raw_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 4, 3, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.of_channel_num, self.tot_of_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 4, 3, 1, 2).contiguous().cuda()
# interpolation
input_frames = input
raw_targets = input # [...,1:]
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
out_1 = self.inc(input_frames)
out_2 = self.down1(out_1)
out_3 = self.down2(out_2)
raw_outputs = self.up1(out_3, out_2)
raw_outputs = self.up2(raw_outputs, out_1)
raw_outputs = self.out(raw_outputs)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = input
input_of = torch.reshape(input_of, (-1, self.raw_channel_num, h, w))
out_1_of = self.inc_of(input_of)
out_2_of = self.down1_of(out_1_of)
out_3_of = self.down2_of(out_2_of)
of_outputs = self.up1_of(out_3_of, out_2_of)
of_outputs = self.up2_of(of_outputs, out_1_of)
of_outputs = self.out_of(of_outputs)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_raw_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
class Conv_LSTM(nn.Module):
def __init__(self, tot_raw_num, nums_hidden, use_flow=True):
super(Conv_LSTM, self).__init__()
self.raw_channel_num = 3 # RGB channel no.
self.of_channel_num = 2
# self.feature_embedding = FeatureEmbedding(model_depth)
self.feature_embedding = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.prediction = Spatial_Decoder(nums_hidden, self.raw_channel_num)
self.convlstm = LSTM(input_dim = nums_hidden[-1], hidden_dim=[nums_hidden[-1],nums_hidden[-1],nums_hidden[-1],
nums_hidden[-1], nums_hidden[-1]],
kernel_size=(3,3), num_layers=5,
batch_first=True, bias=True, return_all_layers=False)
if use_flow:
self.feature_embedding_of = Spatial_Encoder(nums_hidden, self.raw_channel_num)
self.convlstm_of = LSTM(input_dim=nums_hidden[-1],
hidden_dim=[nums_hidden[-1], nums_hidden[-1], nums_hidden[-1]],
kernel_size=(3, 3), num_layers=3,
batch_first=True, bias=True, return_all_layers=False)
self.prediction_of = Spatial_Decoder(nums_hidden, self.of_channel_num)
self.tot_raw_num = tot_raw_num
self.tot_of_num = tot_raw_num
self.use_flow = use_flow
self.nums_hidden = nums_hidden
def forward(self, input, of_targets_full):
b,c_in,h,w = input.shape
assert c_in == self.raw_channel_num*self.tot_raw_num
# convert to 5 dimensions for inputs
input = input.permute(0, 2, 3, 1).contiguous() # b,h,w,c_in
new_shape_input = input.size()[:-1] + (self.raw_channel_num, self.tot_raw_num) # b,h,w,c,l
input = input.view(*new_shape_input)
input = input.permute(0, 4, 3, 1, 2).contiguous().cuda() # b,l,c,h,w
of_targets_full = of_targets_full.permute(0, 2, 3, 1).contiguous()
new_shape_of_targets = of_targets_full.size()[:-1] + (self.of_channel_num, self.tot_of_num)
of_targets_full = of_targets_full.view(*new_shape_of_targets)
of_targets_full = of_targets_full.permute(0, 4, 3, 1, 2).contiguous().cuda()
raw_targets = input
input_frames = input
input_frames = torch.reshape(input_frames, (-1, self.raw_channel_num, h, w))
img_tensor = self.feature_embedding(input_frames) # b*l,c_f,h,w
_, c_f, h_small, w_small = img_tensor.shape
img_tensor = torch.reshape(img_tensor, (-1, self.tot_raw_num, self.nums_hidden[-1], h_small, w_small))
img_tensor, _ = self.convlstm(img_tensor)
# print(img_tensor[0].size())
# zz
# print(img_tensor[0][0].size())
img_tensor = torch.reshape(img_tensor[0], (-1, self.nums_hidden[-1], h_small, w_small))
raw_outputs = self.prediction(img_tensor)
raw_outputs = torch.reshape(raw_outputs, (-1, self.tot_raw_num, self.raw_channel_num, h, w))
if self.use_flow:
of_targets = of_targets_full
input_of = torch.reshape(input, (-1, self.raw_channel_num, h, w))
img_tensor_of = self.feature_embedding_of(input_of)
_, c_f, h_small, w_small = img_tensor_of.shape
img_tensor_of = torch.reshape(img_tensor_of, (-1, self.tot_of_num, self.nums_hidden[-1], h_small, w_small))
img_tensor_of, _ = self.convlstm_of(img_tensor_of)
img_tensor_of = torch.reshape(img_tensor_of[0], (-1, self.nums_hidden[-1], h_small, w_small))
of_outputs = self.prediction_of(img_tensor_of)
of_outputs = torch.reshape(of_outputs, (-1, self.tot_of_num, self.of_channel_num, h, w))
else:
of_outputs = []
of_targets = []
return of_outputs, raw_outputs, of_targets, raw_targets
| 21,874 | 36.521441 | 139 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/flowlib.py
|
#!/usr/bin/python
import png
import numpy as np
import matplotlib.colors as cl
import matplotlib.pyplot as plt
from PIL import Image
UNKNOWN_FLOW_THRESH = 1e7
SMALLFLOW = 0.0
LARGEFLOW = 1e8
"""
=============
Flow Section
=============
"""
def show_flow(filename):
"""
visualize optical flow map using matplotlib
:param filename: optical flow file
:return: None
"""
flow = read_flow(filename)
img = flow_to_image(flow)
plt.imshow(img)
plt.show()
def visualize_flow(flow, mode='Y'):
"""
this function visualize the input flow
:param flow: input flow in array
:param mode: choose which color mode to visualize the flow (Y: Ccbcr, RGB: RGB color)
:return: None
"""
if mode == 'Y':
# Ccbcr color wheel
img = flow_to_image(flow)
plt.imshow(img)
plt.show()
elif mode == 'RGB':
(h, w) = flow.shape[0:2]
du = flow[:, :, 0]
dv = flow[:, :, 1]
valid = flow[:, :, 2]
max_flow = max(np.max(du), np.max(dv))
img = np.zeros((h, w, 3), dtype=np.float64)
# angle layer
img[:, :, 0] = np.arctan2(dv, du) / (2 * np.pi)
# magnitude layer, normalized to 1
img[:, :, 1] = np.sqrt(du * du + dv * dv) * 8 / max_flow
# phase layer
img[:, :, 2] = 8 - img[:, :, 1]
# clip to [0,1]
small_idx = img[:, :, 0:3] < 0
large_idx = img[:, :, 0:3] > 1
img[small_idx] = 0
img[large_idx] = 1
# convert to rgb
img = cl.hsv_to_rgb(img)
# remove invalid point
img[:, :, 0] = img[:, :, 0] * valid
img[:, :, 1] = img[:, :, 1] * valid
img[:, :, 2] = img[:, :, 2] * valid
# show
plt.imshow(img)
plt.show()
return None
def read_flow(filename):
"""
read optical flow from Middlebury .flo file
:param filename: name of the flow file
:return: optical flow data in matrix
"""
f = open(filename, 'rb')
try:
magic = np.fromfile(f, np.float32, count=1)[0] # For Python3.x
except:
magic = np.fromfile(f, np.float32, count=1) # For Python2.x
data2d = None
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print("Reading %d x %d flo file" % (h, w))
data2d = np.fromfile(f, np.float32, count=2 * w * h)
# reshape data into 3D array (columns, rows, channels)
data2d = np.resize(data2d, (h[0], w[0], 2))
f.close()
return data2d
def read_flow_png(flow_file):
"""
Read optical flow from KITTI .png file
:param flow_file: name of the flow file
:return: optical flow data in matrix
"""
flow_object = png.Reader(filename=flow_file)
flow_direct = flow_object.asDirect()
flow_data = list(flow_direct[2])
(w, h) = flow_direct[3]['size']
flow = np.zeros((h, w, 3), dtype=np.float64)
for i in range(len(flow_data)):
flow[i, :, 0] = flow_data[i][0::3]
flow[i, :, 1] = flow_data[i][1::3]
flow[i, :, 2] = flow_data[i][2::3]
invalid_idx = (flow[:, :, 2] == 0)
flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
return flow
def write_flow(flow, filename):
"""
write optical flow in Middlebury .flo format
:param flow: optical flow map
:param filename: optical flow file path to be saved
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = flow.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
magic.tofile(f)
w.tofile(f)
h.tofile(f)
flow.tofile(f)
f.close()
def segment_flow(flow):
h = flow.shape[0]
w = flow.shape[1]
u = flow[:, :, 0]
v = flow[:, :, 1]
idx = ((abs(u) > LARGEFLOW) | (abs(v) > LARGEFLOW))
idx2 = (abs(u) == SMALLFLOW)
class0 = (v == 0) & (u == 0)
u[idx2] = 0.00001
tan_value = v / u
class1 = (tan_value < 1) & (tan_value >= 0) & (u > 0) & (v >= 0)
class2 = (tan_value >= 1) & (u >= 0) & (v >= 0)
class3 = (tan_value < -1) & (u <= 0) & (v >= 0)
class4 = (tan_value < 0) & (tan_value >= -1) & (u < 0) & (v >= 0)
class8 = (tan_value >= -1) & (tan_value < 0) & (u > 0) & (v <= 0)
class7 = (tan_value < -1) & (u >= 0) & (v <= 0)
class6 = (tan_value >= 1) & (u <= 0) & (v <= 0)
class5 = (tan_value >= 0) & (tan_value < 1) & (u < 0) & (v <= 0)
seg = np.zeros((h, w))
seg[class1] = 1
seg[class2] = 2
seg[class3] = 3
seg[class4] = 4
seg[class5] = 5
seg[class6] = 6
seg[class7] = 7
seg[class8] = 8
seg[class0] = 0
seg[idx] = 0
return seg
def flow_error(tu, tv, u, v):
"""
Calculate average end point error
:param tu: ground-truth horizontal flow map
:param tv: ground-truth vertical flow map
:param u: estimated horizontal flow map
:param v: estimated vertical flow map
:return: End point error of the estimated flow
"""
smallflow = 0.0
'''
stu = tu[bord+1:end-bord,bord+1:end-bord]
stv = tv[bord+1:end-bord,bord+1:end-bord]
su = u[bord+1:end-bord,bord+1:end-bord]
sv = v[bord+1:end-bord,bord+1:end-bord]
'''
stu = tu[:]
stv = tv[:]
su = u[:]
sv = v[:]
idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | (abs(stv) > UNKNOWN_FLOW_THRESH)
stu[idxUnknow] = 0
stv[idxUnknow] = 0
su[idxUnknow] = 0
sv[idxUnknow] = 0
ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)]
index_su = su[ind2]
index_sv = sv[ind2]
an = 1.0 / np.sqrt(index_su ** 2 + index_sv ** 2 + 1)
un = index_su * an
vn = index_sv * an
index_stu = stu[ind2]
index_stv = stv[ind2]
tn = 1.0 / np.sqrt(index_stu ** 2 + index_stv ** 2 + 1)
tun = index_stu * tn
tvn = index_stv * tn
'''
angle = un * tun + vn * tvn + (an * tn)
index = [angle == 1.0]
angle[index] = 0.999
ang = np.arccos(angle)
mang = np.mean(ang)
mang = mang * 180 / np.pi
'''
epe = np.sqrt((stu - su) ** 2 + (stv - sv) ** 2)
epe = epe[ind2]
mepe = np.mean(epe)
return mepe
def flow_to_image(flow, display=False):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
if display:
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def evaluate_flow_file(gt, pred):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt: ground truth file path
:param pred: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt) # ground truth flow
eva_flow = read_flow(pred) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe
def evaluate_flow(gt_flow, pred_flow):
"""
gt: ground-truth flow
pred: estimated flow
"""
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], pred_flow[:, :, 0], pred_flow[:, :, 1])
return average_pe
"""
==============
Disparity Section
==============
"""
def read_disp_png(file_name):
"""
Read optical flow from KITTI .png file
:param file_name: name of the flow file
:return: optical flow data in matrix
"""
image_object = png.Reader(filename=file_name)
image_direct = image_object.asDirect()
image_data = list(image_direct[2])
(w, h) = image_direct[3]['size']
channel = len(image_data[0]) / w
flow = np.zeros((h, w, channel), dtype=np.uint16)
for i in range(len(image_data)):
for j in range(channel):
flow[i, :, j] = image_data[i][j::channel]
return flow[:, :, 0] / 256
def disp_to_flowfile(disp, filename):
"""
Read KITTI disparity file in png format
:param disp: disparity matrix
:param filename: the flow file name to save
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = disp.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
empty_map = np.zeros((height, width), dtype=np.float32)
data = np.dstack((disp, empty_map))
magic.tofile(f)
w.tofile(f)
h.tofile(f)
data.tofile(f)
f.close()
"""
==============
Image Section
==============
"""
def read_image(filename):
"""
Read normal image of any format
:param filename: name of the image file
:return: image data in matrix uint8 type
"""
img = Image.open(filename)
im = np.array(img)
return im
def warp_image(im, flow):
"""
Use optical flow to warp image to the next
:param im: image to warp
:param flow: optical flow
:return: warped image
"""
from scipy import interpolate
image_height = im.shape[0]
image_width = im.shape[1]
flow_height = flow.shape[0]
flow_width = flow.shape[1]
n = image_height * image_width
(iy, ix) = np.mgrid[0:image_height, 0:image_width]
(fy, fx) = np.mgrid[0:flow_height, 0:flow_width]
fx += flow[:,:,0]
fy += flow[:,:,1]
mask = np.logical_or(fx <0 , fx > flow_width)
mask = np.logical_or(mask, fy < 0)
mask = np.logical_or(mask, fy > flow_height)
fx = np.minimum(np.maximum(fx, 0), flow_width)
fy = np.minimum(np.maximum(fy, 0), flow_height)
points = np.concatenate((ix.reshape(n,1), iy.reshape(n,1)), axis=1)
xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n,1)), axis=1)
warp = np.zeros((image_height, image_width, im.shape[2]))
for i in range(im.shape[2]):
channel = im[:, :, i]
plt.imshow(channel, cmap='gray')
values = channel.reshape(n, 1)
new_channel = interpolate.griddata(points, values, xi, method='cubic')
new_channel = np.reshape(new_channel, [flow_height, flow_width])
new_channel[mask] = 1
warp[:, :, i] = new_channel.astype(np.uint8)
return warp.astype(np.uint8)
"""
==============
Others
==============
"""
def scale_image(image, new_range):
"""
Linearly scale the image into desired range
:param image: input image
:param new_range: the new range to be aligned
:return: image normalized in new range
"""
min_val = np.min(image).astype(np.float32)
max_val = np.max(image).astype(np.float32)
min_val_new = np.array(min(new_range), dtype=np.float32)
max_val_new = np.array(max(new_range), dtype=np.float32)
scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new
return scaled_image.astype(np.uint8)
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
| 13,798 | 25.794175 | 114 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/vad_datasets.py
|
import torch
import numpy as np
import cv2
from collections import OrderedDict
import os
import glob
import scipy.io as sio
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
transform = transforms.Compose([
transforms.ToTensor(),
])
# frame_size: the frame information of each dataset: (h, w, file_format, scene_num)
frame_size = {'UCSDped1' : (158, 238, '.tif', 1), 'UCSDped2': (240, 360, '.tif', 1), 'avenue': (360, 640, '.jpg', 1), 'ShanghaiTech': (480, 856, '.jpg', 1)}
def get_inputs(file_addr):
file_format = file_addr.split('.')[-1]
if file_format == 'mat':
return sio.loadmat(file_addr, verify_compressed_data_integrity=False)['uv']
elif file_format == 'npy':
return np.load(file_addr)
else:
return cv2.imread(file_addr)
def img_tensor2numpy(img):
# mutual transformation between ndarray-like imgs and Tensor-like images
# both intensity and rgb images are represented by 3-dim data
if isinstance(img, np.ndarray):
return torch.from_numpy(np.transpose(img, [2, 0, 1]))
else:
return np.transpose(img, [1, 2, 0]).numpy()
def img_batch_tensor2numpy(img_batch):
# both intensity and rgb image batch are represented by 4-dim data
if isinstance(img_batch, np.ndarray):
if len(img_batch.shape) == 4:
return torch.from_numpy(np.transpose(img_batch, [0, 3, 1, 2]))
else:
return torch.from_numpy(np.transpose(img_batch, [0, 1, 4, 2, 3]))
else:
if len(img_batch.numpy().shape) == 4:
return np.transpose(img_batch, [0, 2, 3, 1]).numpy()
else:
return np.transpose(img_batch, [0, 1, 3, 4, 2]).numpy()
class bbox_collate:
def __init__(self, mode):
self.mode = mode
def collate(self, batch):
if self.mode == 'train':
return bbox_collate_train(batch)
elif self.mode == 'test':
return bbox_collate_test(batch)
else:
raise NotImplementedError
def bbox_collate_train(batch):
batch_data = [x[0] for x in batch]
batch_target = [x[1] for x in batch]
return torch.cat(batch_data, dim=0), batch_target
def bbox_collate_test(batch):
batch_data = [x[0] for x in batch]
batch_target = [x[1] for x in batch]
return batch_data, batch_target
def get_foreground(img, bboxes, patch_size):
img_patches = list()
if len(img.shape) == 3:
for i in range(len(bboxes)):
x_min, x_max = np.int(np.ceil(bboxes[i][0])), np.int(np.ceil(bboxes[i][2]))
y_min, y_max = np.int(np.ceil(bboxes[i][1])), np.int(np.ceil(bboxes[i][3]))
cur_patch = img[:, y_min:y_max, x_min:x_max]
cur_patch = cv2.resize(np.transpose(cur_patch, [1, 2, 0]), (patch_size, patch_size))
img_patches.append(np.transpose(cur_patch, [2, 0, 1]))
img_patches = np.array(img_patches)
elif len(img.shape) == 4:
for i in range(len(bboxes)):
x_min, x_max = np.int(np.ceil(bboxes[i][0])), np.int(np.ceil(bboxes[i][2]))
y_min, y_max = np.int(np.ceil(bboxes[i][1])), np.int(np.ceil(bboxes[i][3]))
cur_patch_set = img[:, :, y_min:y_max, x_min:x_max]
tmp_set = list()
for j in range(img.shape[0]):
cur_patch = cur_patch_set[j]
cur_patch = cv2.resize(np.transpose(cur_patch, [1, 2, 0]), (patch_size, patch_size))
tmp_set.append(np.transpose(cur_patch, [2, 0, 1]))
cur_cube = np.array(tmp_set)
img_patches.append(cur_cube)
img_patches = np.array(img_patches)
return img_patches
def unified_dataset_interface(dataset_name, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format=None, all_bboxes=None, patch_size=32):
if file_format is None:
if dataset_name in ['UCSDped1', 'UCSDped2']:
file_format = '.tif'
elif dataset_name in ['avenue', 'ShanghaiTech']:
file_format = '.jpg'
else:
raise NotImplementedError
if dataset_name in ['UCSDped1', 'UCSDped2']:
dataset = ped_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
elif dataset_name == 'avenue':
dataset = avenue_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
elif dataset_name == 'ShanghaiTech':
dataset = shanghaiTech_dataset(dir=dir, context_frame_num=context_frame_num, mode=mode, border_mode=border_mode, all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
else:
raise NotImplementedError
return dataset
class patch_to_train_dataset(Dataset):
def __init__(self, data, tranform=transform):
self.data = data
self.transform = tranform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
if self.transform is not None:
return self.transform(self.data[indice])
else:
return self.data[indice]
class cube_to_train_dataset_ssl(Dataset):
def __init__(self, data, labels=None, transform=transform):
self.data = data # N,l,h,w,c
if labels is not None:
self.labels = labels
else:
self.labels = None
self.transform = transform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
cur_data = self.data[indice]
if self.transform is not None:
cur_data2return = []
for idx in range(cur_data.shape[0]):
cur_data2return.append(self.transform(cur_data[idx])) # h,w,c -> c,h,w + ->[0,1]
cur_data2return = torch.stack(cur_data2return, 0) # l,c,h,w
else:
cur_data2return = cur_data
if self.labels is not None:
cur_label = self.labels[indice]
return cur_data2return, cur_label
else:
return cur_data2return
class cube_to_train_dataset(Dataset):
def __init__(self, data, target=None, tranform=transform):
if len(data.shape) == 4:
data = data[:, np.newaxis, :, :, :]
if target is not None:
if len(target.shape) == 4:
target = target[:, np.newaxis, :, :, :]
self.data = data
self.target = target
self.transform = tranform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, indice):
if self.target is None:
cur_data = self.data[indice]
cur_train_data = cur_data[:-1]
cur_target = cur_data[-1]
cur_train_data = np.transpose(cur_train_data, [1, 2, 0, 3])
cur_train_data = np.reshape(cur_train_data, (cur_train_data.shape[0], cur_train_data.shape[1], -1))
if self.transform is not None:
return self.transform(cur_train_data), self.transform(cur_target)
else:
return cur_train_data, cur_target
else:
cur_data = self.data[indice]
cur_train_data = cur_data
cur_target = self.target[indice]
cur_target2 = cur_data.copy()
cur_train_data = np.transpose(cur_train_data, [1, 2, 0, 3])
cur_train_data = np.reshape(cur_train_data, (cur_train_data.shape[0], cur_train_data.shape[1], -1))
cur_target = np.transpose(cur_target, [1, 2, 0, 3])
cur_target = np.reshape(cur_target, (cur_target.shape[0], cur_target.shape[1], -1))
cur_target2 = np.transpose(cur_target2, [1, 2, 0, 3])
cur_target2 = np.reshape(cur_target2, (cur_target2.shape[0], cur_target2.shape[1], -1))
if self.transform is not None:
return self.transform(cur_train_data), self.transform(cur_target), self.transform(cur_target2)
else:
return cur_train_data, cur_target, cur_target2
class ped_dataset(Dataset):
'''
Loading dataset for UCSD ped2
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.tif', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load UCSD ped2 dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
if mode == 'test':
self.all_gt_addr = list()
self.gts = OrderedDict()
if self.dir[-1] == '1':
self.h = 158
self.w = 238
else:
self.h = 240
self.w = 360
self.dataset_init()
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'Train')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'Test')
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
if 'Train' in video_name:
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
dir_list = glob.glob(os.path.join(data_dir, '*'))
video_dir_list = []
gt_dir_list = []
for dir in sorted(dir_list):
if '_gt' in dir:
gt_dir_list.append(dir)
self.return_gt = True
else:
name = dir.split('/')[-1]
if 'Test' in name:
video_dir_list.append(dir)
# load frames for test
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# load ground truth of frames
if self.return_gt:
for gt in sorted(gt_dir_list):
gt_name = gt.split('/')[-1]
self.gts[gt_name] = {}
self.gts[gt_name]['gt_frame'] = glob.glob(os.path.join(gt, '*.bmp'))
self.gts[gt_name]['gt_frame'].sort()
# merge different frames of different videos into one list
for _, cont in self.gts.items():
self.all_gt_addr += cont['gt_frame']
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = cv2.imread(self.all_gt_addr[indice], cv2.IMREAD_GRAYSCALE)
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = cv2.imread(self.all_gt_addr[indice], cv2.IMREAD_GRAYSCALE)
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1) # to unify the interface
else:
raise NotImplementedError
class avenue_dataset(Dataset):
'''
Loading dataset for Avenue
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.jpg', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load Avenue dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
if mode == 'test':
self.all_gt = list()
self.dataset_init()
pass
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'training', 'frames')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'testing', 'frames')
# gt_dir = os.path.join(self.dir, 'ground_truth_demo', 'testing_label_mask')
gt_dir = os.path.join(self.dir, 'ground_truth_demo', 'testing_label_mask')
if os.path.exists(gt_dir):
self.return_gt = True
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# set address of ground truth of frames
if self.return_gt:
self.all_gt = [sio.loadmat(os.path.join(gt_dir, str(x + 1)+'_label.mat'))['volLabel'] for x in range(len(self.videos))]
self.all_gt = np.concatenate(self.all_gt, axis=1)
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = self.all_gt[0, indice]
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = self.all_gt[0, indice]
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1)
else:
raise NotImplementedError
class shanghaiTech_dataset(Dataset):
'''
Loading dataset for ShanghaiTech
'''
def __init__(self, dir, mode='train', context_frame_num=0, border_mode='elastic', file_format='.jpg', all_bboxes=None, patch_size=32):
'''
:param dir: The directory to load ShanghaiTech dataset
mode: train/test dataset
'''
self.dir = dir
self.mode = mode
self.videos = OrderedDict()
self.all_frame_addr = list()
self.frame_video_idx = list()
self.tot_frame_num = 0
self.context_frame_num = context_frame_num
self.border_mode = border_mode
self.file_format = file_format
self.all_bboxes = all_bboxes
self.patch_size = patch_size
self.return_gt = False
self.save_scene_idx = list()
self.scene_idx = list()
self.scene_num = 0
if mode == 'test':
self.all_gt = list()
self.dataset_init()
pass
def __len__(self):
return self.tot_frame_num
def dataset_init(self):
if self.mode == 'train':
data_dir = os.path.join(self.dir, 'training', 'videosFrame')
elif self.mode == 'test':
data_dir = os.path.join(self.dir, 'testing', 'frames')
gt_dir = os.path.join(self.dir, 'testing', 'test_frame_mask')
if os.path.exists(gt_dir):
self.return_gt = True
else:
raise NotImplementedError
if self.mode == 'train':
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
idx = 1
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
self.save_scene_idx += [int(video_name[:2])] * len(self.videos[video_name]['frame']) # frame data are saved by save_scene_idx
self.scene_idx += [1] * len(self.videos[video_name]['frame']) # frames are processed by scene idx
self.scene_num = len(set(self.scene_idx))
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
elif self.mode == 'test':
idx = 1
# for j in [1, 2]:
video_dir_list = glob.glob(os.path.join(data_dir, '*'))
for video in sorted(video_dir_list):
video_name = video.split('/')[-1]
self.videos[video_name] = {}
self.videos[video_name]['path'] = video
self.videos[video_name]['frame'] = glob.glob(os.path.join(video, '*'+self.file_format))
self.videos[video_name]['frame'].sort()
self.videos[video_name]['length'] = len(self.videos[video_name]['frame'])
self.frame_video_idx += [idx] * self.videos[video_name]['length']
idx += 1
self.save_scene_idx += [int(video_name[:2])] * len(self.videos[video_name]['frame'])
self.scene_idx += [1] * len(self.videos[video_name]['frame'])
self.scene_num = len(set(self.scene_idx))
# merge different frames of different videos into one list
for _, cont in self.videos.items():
self.all_frame_addr += cont['frame']
self.tot_frame_num = len(self.all_frame_addr)
# load ground truth of frames
if self.return_gt:
gt_dir_list = glob.glob(os.path.join(gt_dir, '*'))
for gt in sorted(gt_dir_list):
self.all_gt.append(np.load(gt))
# merge different frames of different videos into one list, only support frame gt now due to memory issue
self.all_gt = np.concatenate(self.all_gt, axis=0)
else:
raise NotImplementedError
def context_range(self, indice):
if self.border_mode == 'elastic':
# check head and tail
if indice - self.context_frame_num < 0:
indice = self.context_frame_num
elif indice + self.context_frame_num > self.tot_frame_num - 1:
indice = self.tot_frame_num - 1 - self.context_frame_num
start_idx = indice - self.context_frame_num
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
elif self.border_mode == 'predict':
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
end_idx = indice
need_context_num = self.context_frame_num + 1
else:
# check head and tail
if indice - self.context_frame_num < 0:
start_idx = 0
else:
start_idx = indice - self.context_frame_num
if indice + self.context_frame_num > self.tot_frame_num - 1:
end_idx = self.tot_frame_num - 1
else:
end_idx = indice + self.context_frame_num
need_context_num = 2 * self.context_frame_num + 1
center_idx = self.frame_video_idx[indice]
video_idx = self.frame_video_idx[start_idx:end_idx + 1]
pad = need_context_num - len(video_idx)
if pad > 0:
if start_idx == 0:
video_idx = [video_idx[0]] * pad + video_idx
else:
video_idx = video_idx + [video_idx[-1]] * pad
tmp = np.array(video_idx) - center_idx
offset = tmp.sum()
if tmp[0] != 0 and tmp[-1] != 0: # extreme condition that is not likely to happen
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if pad == 0 and offset == 0: # all frames are from the same video
idx = [x for x in range(start_idx, end_idx+1)]
return idx
else:
if self.border_mode == 'elastic':
idx = [x for x in range(start_idx - offset, end_idx - offset + 1)]
return idx
elif self.border_mode == 'predict':
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.maximum(np.abs(offset), pad) + idx
return idx
else:
if pad > 0 and np.abs(offset) > 0:
print('The video is too short or the context frame number is too large!')
raise NotImplementedError
if offset > 0:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * np.abs(offset)
return idx
elif offset < 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * np.abs(offset) + idx
return idx
if pad > 0:
if start_idx == 0:
idx = [x for x in range(start_idx - offset, end_idx + 1)]
idx = [idx[0]] * pad + idx
return idx
else:
idx = [x for x in range(start_idx, end_idx - offset + 1)]
idx = idx + [idx[-1]] * pad
return idx
def __getitem__(self, indice):
if self.mode == 'train':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
return img_batch, torch.zeros(1) # to unify the interface
elif self.mode == 'test':
if self.context_frame_num == 0:
img_batch = np.transpose(get_inputs(self.all_frame_addr[indice]), [2, 0, 1])
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = np.array([self.all_gt[indice]])
gt_batch = torch.from_numpy(gt_batch)
else:
frame_range = self.context_range(indice=indice)
img_batch = []
for idx in frame_range:
cur_img = np.transpose(get_inputs(self.all_frame_addr[idx]), [2, 0, 1])
img_batch.append(cur_img)
img_batch = np.array(img_batch)
if self.all_bboxes is not None:
img_batch = get_foreground(img=img_batch, bboxes=self.all_bboxes[indice], patch_size=self.patch_size)
img_batch = torch.from_numpy(img_batch)
if self.return_gt:
gt_batch = np.array([self.all_gt[indice]])
gt_batch = torch.from_numpy(gt_batch)
if self.return_gt:
return img_batch, gt_batch
else:
return img_batch, torch.zeros(1) # to unify the interface
else:
raise NotImplementedError
| 39,197 | 43.291525 | 191 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/utils.py
|
import numpy as np
from sklearn.metrics import roc_curve, precision_recall_curve, auc
def save_roc_pr_curve_data(scores, labels, file_path, verbose=True):
scores = scores.flatten()
labels = labels.flatten()
scores_pos = scores[labels == 1]
scores_neg = scores[labels != 1]
truth = np.concatenate((np.zeros_like(scores_neg), np.ones_like(scores_pos)))
preds = np.concatenate((scores_neg, scores_pos))
fpr, tpr, roc_thresholds = roc_curve(truth, preds)
roc_auc = auc(fpr, tpr)
# calculate EER
# fnr = 1 - tpr
# eer1 = fpr[np.nanargmin(np.absolute(fnr-fpr))]
# eer2 = fnr[np.nanargmin(np.absolute(fnr-fpr))]
#
# # pr curve where "normal" is the positive class
# precision_norm, recall_norm, pr_thresholds_norm = precision_recall_curve(truth, preds)
# pr_auc_norm = auc(recall_norm, precision_norm)
#
# # pr curve where "anomaly" is the positive class
# precision_anom, recall_anom, pr_thresholds_anom = precision_recall_curve(truth, -preds, pos_label=0)
# pr_auc_anom = auc(recall_anom, precision_anom)
if verbose is True:
print('AUC@ROC is {}'.format(roc_auc))#, 'EER1 is {}'.format(eer1), 'EER2 is {}'.format(eer2))
# np.savez_compressed(file_path,
# preds=preds, truth=truth,
# fpr=fpr, tpr=tpr, roc_thresholds=roc_thresholds, roc_auc=roc_auc,
# precision_norm=precision_norm, recall_norm=recall_norm,
# pr_thresholds_norm=pr_thresholds_norm, pr_auc_norm=pr_auc_norm,
# precision_anom=precision_anom, recall_anom=recall_anom,
# pr_thresholds_anom=pr_thresholds_anom, pr_auc_anom=pr_auc_anom)
return roc_auc
| 1,772 | 40.232558 | 106 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/module.py
|
import torch
import torch.nn as nn
import copy
from module_utils import *
import torch.nn.functional as F
from matplotlib import pyplot as plt
####################################################################################
######################### definition for encoder #################################
####################################################################################
class ConvTransformerEncoder(nn.Module):
def __init__(self, num_layers=5, model_depth=128, num_heads=4,
with_residual=True, with_pos=True, pos_kind='sine'):
super(ConvTransformerEncoder, self).__init__()
self.encoderlayer = ConvTransformerEncoderLayer(model_depth, num_heads, with_residual=with_residual,
with_pos=with_pos)
self.num_layers = num_layers
self.depth_perhead = model_depth//num_heads
self.encoder = self.__get_clones(self.encoderlayer, self.num_layers)
self.positionnet = PositionalEmbeddingLearned(int(model_depth/num_heads))
self.pos_kind = pos_kind
def __get_clones(self, module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def forward(self, input_tensor):
out = input_tensor
if self.pos_kind == 'sine':
b, l, c, h, w = input_tensor.shape
pos = positional_encoding(l, self.depth_perhead, h, w)
elif self.pos_kind == 'learned':
pos = self.positionnet(input_tensor.shape[1:])
for layer in self.encoder:
out = layer(out, pos)
return out
class ConvTransformerEncoderLayer(nn.Module): # work as a bridge to handle multi-head
def __init__(self, model_depth=128, num_heads=4, with_residual=True, with_pos=True):
super(ConvTransformerEncoderLayer, self).__init__()
self.depth = model_depth
self.depth_perhead = int(model_depth/num_heads)
self.with_residual = with_residual
self.attention_heads = self.__get_clones(ConvTransformerEncoderLayerOneHead(self.depth_perhead,
with_pos=with_pos), num_heads)
self.feedforward = FeedForwardNet(self.depth)
self.GN1 = nn.GroupNorm(num_groups=4, num_channels=model_depth)
def __get_clones(self, module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def forward(self, input_tensor, pos_encoding):
heads_out = []
i = 0
for head in self.attention_heads:
heads_out.append(head(input_tensor[:, :, i*self.depth_perhead:(i+1)*self.depth_perhead, :, :], pos_encoding))
i += 1
if self.with_residual:
att_out = torch.cat(heads_out, dim=2) + input_tensor # b,l,c,h,w
b,l,c,h,w = att_out.shape
att_out = torch.reshape(att_out, (-1,c,h,w))
out = self.feedforward(att_out) + att_out
else:
att_out = torch.cat(heads_out, dim=2)
b, l, c, h, w = att_out.shape
att_out = torch.reshape(att_out, (-1, c, h, w))
out = self.feedforward(att_out)
out = self.GN1(out)
out = torch.reshape(out, (b, l, c, h, w))
return out
class ConvTransformerEncoderLayerOneHead(nn.Module):
def __init__(self, head_depth=32, with_pos=True):
super(ConvTransformerEncoderLayerOneHead, self).__init__()
self.depth_perhead = head_depth
self.q_featuremap = QNet(self.depth_perhead)
self.k_v_featuremap = KVNet(self.depth_perhead)
self.attentionmap = AttentionNet(self.depth_perhead * 2)
self.feedforward = FeedForwardNet(self.depth_perhead)
self.with_pos = with_pos
def forward(self, input_tensor, pos_encoding):
batch, length, channel, height, width = input_tensor.shape
input_tensor = torch.reshape(input_tensor, (batch*length, channel, height, width)) # b*l,c,h,w
q_feature = self.q_featuremap(input_tensor)
k_feature = v_feature = self.k_v_featuremap(input_tensor)
q_feature = torch.reshape(q_feature, (batch, length, channel, height, width)) # b,l,c,h,w
k_feature = torch.reshape(k_feature, (batch, length, channel, height, width)) # b,l,c,h,w
v_feature = torch.reshape(v_feature, (batch, length, channel, height, width)) # b,l,c,h,w
if self.with_pos:
q_feature = (q_feature + pos_encoding)
k_feature = (k_feature + pos_encoding)
else:
q_feature = q_feature
k_feature = k_feature
# convolutional self-attention part
q_feature = q_feature.unsqueeze(dim=2).repeat(1, 1, length, 1, 1, 1) # b,l,l,c,h,w
k_feature = k_feature.unsqueeze(dim=1).repeat(1, length, 1, 1, 1, 1) # b,l,l,c,h,w
v_feature = v_feature.unsqueeze(dim=1).repeat(1, length, 1, 1, 1, 1) # b,l,l,c,h,w
q_k_concat = torch.cat((q_feature, k_feature), dim=3) # b,l,l,2c,h,w
dim0, dim1, dim2, dim3, dim4, dim5 = q_k_concat.shape
q_k_concat = torch.reshape(q_k_concat, (dim0 * dim1 * dim2, dim3, dim4, dim5))
attention_map = self.attentionmap(q_k_concat)
attention_map = torch.reshape(attention_map, (dim0, dim1, dim2, 1, dim4, dim5))
attention_map = nn.Softmax(dim=2)(attention_map) # b,l,l,1,h,w
attentioned_v_Feature = attention_map * v_feature # b,l,l,c,h,w
attentioned_v_Feature = torch.sum(attentioned_v_Feature, dim=2) # b,l,c,h,w
return attentioned_v_Feature
| 5,573 | 43.951613 | 121 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/resnet_pytorch.py
|
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock', in_channels=3):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(in_channels, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x, feat
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
| 5,088 | 29.842424 | 116 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/module_utils.py
|
import torch
import torch.nn as nn
from torch.nn import init
import math
import copy
import numpy as np
from skimage import measure
class QNet(nn.Module):
def __init__(self, depth=32):
super(QNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
q_feature = self.conv0(input_tensor)
return q_feature
class KVNet(nn.Module):
def __init__(self, depth=32):
super(KVNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
k_v_feature = self.conv0(input_tensor)
return k_v_feature
class FeedForwardNet(nn.Module):
def __init__(self, depth=128):
super(FeedForwardNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=depth, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
out = self.conv0(input_tensor)
return out
class AttentionNet(nn.Module):
def __init__(self, depth=64):
super(AttentionNet, self).__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=depth, out_channels=1, kernel_size=3, stride=1, padding=1, bias=True),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, input_tensor):
out = self.conv0(input_tensor)
return out
def _get_clones(module, n):
return nn.ModuleList([copy.deepcopy(module) for i in range(n)])
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
init.xavier_normal(m.weight.data)
if classname.find('ConvTranspose2d') != -1:
init.xavier_normal(m.weight.data)
def cal_psnr(img1, img2):
img1_np = np.array(img1)
img2_np = np.array(img2)
return measure.compare_psnr(img1_np, img2_np)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2*(i // 2))/ np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model, h=128, w=226):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
sines = np.sin(angle_rads[:, 0::2])
cones = np.cos(angle_rads[:, 1::2])
pos_encoding = np.concatenate([sines, cones], axis=-1).astype(np.float32)
pos_embedding = torch.from_numpy(0.5*pos_encoding)
pos = pos_embedding.unsqueeze(2).repeat(1, 1, h * w).reshape(position, d_model, h, w).cuda()
return pos
class PositionalEmbeddingLearned(nn.Module):
def __init__(self, embedding_depth=128):
super(PositionalEmbeddingLearned, self).__init__()
self.depth = embedding_depth
self.positional_embedding = nn.Embedding(10, self.depth).cuda()
def forward(self, shape):
b, c, h, w = shape
index = torch.arange(b).cuda()#to('cuda:0')
position = self.positional_embedding(index) # 5 * 64
position = position.unsqueeze(2).repeat(1, 1, h * w).reshape(b, self.depth, h, w)
return position
def get_model_name(cfg):
if cfg.w_res:
s_res = 'w_res-'
else:
s_res = 'wo_res-'
if cfg.w_pos:
s_pos = 'w_pos-'
s_pos_kind = cfg.pos_kind
else:
s_pos = 'wo_pos-'
s_pos_kind = 'none'
s_num_heads = f'{cfg.n_heads}heads-'
s_num_layers = f'{cfg.n_layers}layers-'
s_num_dec_frames = f'dec_{cfg.dec_frames}-'
s_model_type = '-inter' if cfg.model_type == 0 else '-extra'
model_kind = s_num_heads + s_num_layers + s_num_dec_frames + s_res + s_pos + s_pos_kind + s_model_type
return model_kind
if __name__ == '__main__':
x = positional_encoding(3, 64)
print('debug')
| 4,119 | 30.212121 | 108 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/train.py
|
import numpy as np
import os
from torch.utils.data import DataLoader
from vad_datasets import unified_dataset_interface, cube_to_train_dataset
from vad_datasets import bbox_collate, img_tensor2numpy, img_batch_tensor2numpy, frame_size
from helper.misc import AverageMeter
import torch
from state_model import ConvTransformer_recon_correct
import torch.optim as optim
import torch.nn as nn
import argparse
import os
import sys
pyfile_name = os.path.basename(sys.argv[0]).split(".")[0]
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', default='avenue', type=str)
parser.add_argument('-n_l', '--num_layers', default=3, type=int)
parser.add_argument('-n_h', '--num_heads', default=4, type=int)
parser.add_argument('-pe', '--positional_encoding', default='learned', type=str)
parser.add_argument('-e', '--epochs', default=20, type=int)
parser.add_argument('-b', '--batch_size', default=128, type=int)
parser.add_argument('-l', '--temporal_length', default=3, type=int)
parser.add_argument('-lam_r', '--lambda_raw', default=1, type=float)
parser.add_argument('-lam_o', '--lambda_of', default=1, type=float)
parser.add_argument('-train_b', '--train_bbox_saved', default=True, type=str2bool)
parser.add_argument('-train_f', '--train_foreground_saved', default=True, type=str2bool)
parser.add_argument('-f', '--use_flow', default=True, type=str2bool)
parser.add_argument('-bd', '--border_mode', default='elastic', type=str)
args = parser.parse_args()
def calc_block_idx(x_min, x_max, y_min, y_max, h_step, w_step, mode):
all_blocks = list()
center = np.array([(y_min + y_max) / 2, (x_min + x_max) / 2])
all_blocks.append(center + center)
if mode > 1:
all_blocks.append(np.array([y_min, center[1]]) + center)
all_blocks.append(np.array([y_max, center[1]]) + center)
all_blocks.append(np.array([center[0], x_min]) + center)
all_blocks.append(np.array([center[0], x_max]) + center)
if mode >= 9:
all_blocks.append(np.array([y_min, x_min]) + center)
all_blocks.append(np.array([y_max, x_max]) + center)
all_blocks.append(np.array([y_max, x_min]) + center)
all_blocks.append(np.array([y_min, x_max]) + center)
all_blocks = np.array(all_blocks) / 2
h_block_idxes = all_blocks[:, 0] / h_step
w_block_idxes = all_blocks[:, 1] / w_step
h_block_idxes, w_block_idxes = list(h_block_idxes.astype(np.int)), list(w_block_idxes.astype(np.int))
# delete repeated elements
all_blocks = set([x for x in zip(h_block_idxes, w_block_idxes)])
all_blocks = [x for x in all_blocks]
return all_blocks
# /*------------------------------------overall parameter setting------------------------------------------*/
dataset_name = args.dataset
raw_dataset_dir = 'raw_datasets'
foreground_extraction_mode = 'obj_det_with_motion'
data_root_dir = 'data'
modality = 'raw2flow'
mode ='train'
method = 'SelfComplete'
num_layers = args.num_layers
num_heads = args.num_heads
pe = args.positional_encoding
context_frame_num = args.temporal_length
context_of_num = args.temporal_length
patch_size = 32
h_block = 1
w_block = 1
train_block_mode = 1
bbox_saved = args.train_bbox_saved
foreground_saved = args.train_foreground_saved
motionThr = 0
# /*------------------------------------------foreground extraction----------------------------------------------*/
config_file = './obj_det_config/cascade_rcnn_r101_fpn_1x.py'
checkpoint_file = './obj_det_checkpoints/cascade_rcnn_r101_fpn_1x_20181129-d64ebac7.pth'
# set dataset for foreground extraction
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(raw_dataset_dir, dataset_name),
context_frame_num=1, mode=mode, border_mode='hard')
if not bbox_saved:
# build the model from a config file and a checkpoint file
from fore_det.inference import init_detector
from fore_det.obj_det_with_motion import imshow_bboxes, getObBboxes, getFgBboxes, delCoverBboxes
from fore_det.simple_patch import get_patch_loc
model = init_detector(config_file, checkpoint_file, device='cuda:0')
collate_func = bbox_collate('train')
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=collate_func.collate)
all_bboxes = list()
for idx in range(dataset.__len__()):
batch, _ = dataset.__getitem__(idx)
print('Extracting bboxes of {}-th frame'.format(idx + 1))
cur_img = img_tensor2numpy(batch[1])
if foreground_extraction_mode == 'obj_det_with_motion':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
ob_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
# further foreground detection by motion
fg_bboxes = getFgBboxes(cur_img, img_batch_tensor2numpy(batch), ob_bboxes, dataset_name, verbose=False)
if fg_bboxes.shape[0] > 0:
cur_bboxes = np.concatenate((ob_bboxes, fg_bboxes), axis=0)
else:
cur_bboxes = ob_bboxes
elif foreground_extraction_mode == 'obj_det':
# A coarse detection of bboxes by pretrained object detector
ob_bboxes = getObBboxes(cur_img, model, dataset_name)
cur_bboxes = delCoverBboxes(ob_bboxes, dataset_name)
elif foreground_extraction_mode == 'simple_patch':
patch_num_list = [(3, 4), (6, 8)]
cur_bboxes = list()
for h_num, w_num in patch_num_list:
cur_bboxes.append(get_patch_loc(frame_size[dataset_name][0], frame_size[dataset_name][1], h_num, w_num))
cur_bboxes = np.concatenate(cur_bboxes, axis=0)
else:
raise NotImplementedError
# imshow_bboxes(cur_img, cur_bboxes)
all_bboxes.append(cur_bboxes)
np.save(os.path.join(dataset.dir, 'bboxes_train_{}.npy'.format(foreground_extraction_mode)), all_bboxes)
print('bboxes for training data saved!')
else:
all_bboxes = np.load(os.path.join(dataset.dir, 'bboxes_train_{}.npy'.format(foreground_extraction_mode)),
allow_pickle=True)
print('bboxes for training data loaded!')
# /------------------------- extract foreground using extracted bboxes---------------------------------------/
border_mode = args.border_mode
if not foreground_saved:
if modality == 'raw_datasets':
file_format = frame_size[dataset_name][2]
elif modality == 'raw2flow':
file_format1 = frame_size[dataset_name][2]
file_format2 = '.npy'
else:
file_format = '.npy'
if modality == 'raw2flow':
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('raw_datasets', dataset_name),
context_frame_num=context_frame_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format1)
dataset2 = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join('optical_flow', dataset_name),
context_frame_num=context_of_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format2)
else:
dataset = unified_dataset_interface(dataset_name=dataset_name, dir=os.path.join(modality, dataset_name),
context_frame_num=context_frame_num, mode=mode, border_mode=border_mode,
all_bboxes=all_bboxes, patch_size=patch_size, file_format=file_format)
if dataset_name == 'ShanghaiTech':
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in range(dataset.scene_num)]
if modality == 'raw2flow':
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
else:
foreground_set = [[[] for ww in range(w_block)] for hh in range(h_block)]
if modality == 'raw2flow':
foreground_set2 = [[[] for ww in range(w_block)] for hh in range(h_block)]
h_step, w_step = frame_size[dataset_name][0] / h_block, frame_size[dataset_name][1] / w_block
dataset_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1,
collate_fn=bbox_collate(mode=mode).collate)
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
randIdx = np.random.permutation(dataset.__len__())
cout = 0
segIdx = 0
saveSegNum = 40000
for iidx in range(dataset.__len__()):
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
idx = randIdx[iidx]
cout += 1
else:
idx = iidx
batch, _ = dataset.__getitem__(idx)
if modality == 'raw2flow':
batch2, _ = dataset2.__getitem__(idx)
if dataset_name == 'ShanghaiTech':
print(
'Extracting foreground in {}-th batch, {} in total, scene: {}'.format(iidx + 1, dataset.__len__() // 1,
dataset.scene_idx[idx]))
else:
print('Extracting foreground in {}-th batch, {} in total'.format(iidx + 1, dataset.__len__() // 1))
cur_bboxes = all_bboxes[idx]
if len(cur_bboxes) > 0:
batch = img_batch_tensor2numpy(batch)
if modality == 'raw2flow':
batch2 = img_batch_tensor2numpy(batch2)
if modality == 'optical_flow':
if len(batch.shape) == 4:
mag = np.sum(np.sum(np.sum(batch ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch ** 2, axis=4), axis=3), axis=2), axis=1)
elif modality == 'raw2flow':
if len(batch2.shape) == 4:
mag = np.sum(np.sum(np.sum(batch2 ** 2, axis=3), axis=2), axis=1)
else:
mag = np.mean(np.sum(np.sum(np.sum(batch2 ** 2, axis=4), axis=3), axis=2), axis=1)
else:
mag = np.ones(batch.shape[0]) * 10000
for idx_bbox in range(cur_bboxes.shape[0]):
if mag[idx_bbox] > motionThr:
all_blocks = calc_block_idx(cur_bboxes[idx_bbox, 0], cur_bboxes[idx_bbox, 2],
cur_bboxes[idx_bbox, 1], cur_bboxes[idx_bbox, 3], h_step, w_step,
mode=train_block_mode)
for (h_block_idx, w_block_idx) in all_blocks:
if dataset_name == 'ShanghaiTech':
foreground_set[dataset.scene_idx[idx] - 1][h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[dataset.scene_idx[idx] - 1][h_block_idx][w_block_idx].append(
batch2[idx_bbox])
else:
foreground_set[h_block_idx][w_block_idx].append(batch[idx_bbox])
if modality == 'raw2flow':
foreground_set2[h_block_idx][w_block_idx].append(batch2[idx_bbox])
if dataset_name == 'ShanghaiTech' and modality == 'raw2flow':
if cout == saveSegNum:
foreground_set = [
[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
foreground_set2 = [
[[np.array(foreground_set2[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss
in range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set2)
del foreground_set, foreground_set2
cout = 0
segIdx += 1
foreground_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
foreground_set2 = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(dataset.scene_num)]
if dataset_name == 'ShanghaiTech':
if modality != 'raw2flow':
foreground_set = [[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ss in range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}.npy'.format(foreground_extraction_mode)),
foreground_set)
else:
if dataset.__len__() % saveSegNum != 0:
foreground_set = [
[[np.array(foreground_set[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
for ss in range(dataset.scene_num)]
foreground_set2 = [
[[np.array(foreground_set2[ss][hh][ww]) for ww in range(w_block)] for hh in range(h_block)] for ss
in
range(dataset.scene_num)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set)
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)), foreground_set2)
else:
if modality == 'raw2flow':
foreground_set = [[np.array(foreground_set[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-raw.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
foreground_set2 = [[np.array(foreground_set2[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality, dataset_name + '_' + 'foreground_train_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), foreground_set2)
else:
foreground_set = [[np.array(foreground_set[hh][ww]) for ww in range(w_block)] for hh in range(h_block)]
np.save(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(foreground_extraction_mode, context_frame_num, border_mode)),
foreground_set)
print('foreground for training data saved!')
else:
if dataset_name != 'ShanghaiTech':
if modality == 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
else:
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
print('foreground for training data loaded!')
else:
if modality != 'raw2flow':
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_{}_border_{}.npy'.format(
foreground_extraction_mode, context_frame_num, border_mode)), allow_pickle=True)
# /*------------------------------------------Normal event modeling----------------------------------------------*/
if method == 'SelfComplete':
loss_func = nn.MSELoss()
epochs = args.epochs
batch_size = args.batch_size
useFlow = args.use_flow
if border_mode == 'predict':
tot_frame_num = context_frame_num + 1
tot_of_num = context_of_num + 1
else:
tot_frame_num = 2 * context_frame_num + 1
tot_of_num = 2 * context_of_num + 1
rawRange = 10
if rawRange >= tot_frame_num: # if rawRange is out of the range, use all frames
rawRange = None
padding = False
lambda_raw = args.lambda_raw
lambda_of = args.lambda_of
assert modality == 'raw2flow'
if dataset_name == 'ShanghaiTech':
model_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
raw_training_scores_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
of_training_scores_set = [[[[] for ww in range(w_block)] for hh in range(h_block)] for ss in
range(frame_size[dataset_name][-1])]
else:
model_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
raw_training_scores_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
of_training_scores_set = [[[] for ww in range(len(foreground_set[hh]))] for hh in range(len(foreground_set))]
# Prepare training data in current block
if dataset_name == 'ShanghaiTech':
saveSegNum = 40000
totSegNum = np.int(np.ceil(dataset.__len__() / saveSegNum))
for s_idx in range(len(model_set)):
for h_idx in range(len(model_set[s_idx])):
for w_idx in range(len(model_set[s_idx][h_idx])):
raw_losses = AverageMeter()
of_losses = AverageMeter()
cur_model = torch.nn.DataParallel(ConvTransformer_recon_correct(
tot_raw_num = tot_frame_num, nums_hidden = [32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=useFlow)).cuda()
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.000)
cur_model.train()
for epoch in range(epochs):
for segIdx in range(totSegNum):
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
cur_training_data = foreground_set[s_idx][h_idx][w_idx]
cur_training_data2 = foreground_set2[s_idx][h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
cur_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size, shuffle=True)
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func(raw_targets.detach(), raw_outputs)
if useFlow:
loss_of = loss_func(of_targets.detach(), of_outputs)
if useFlow:
loss = lambda_raw * loss_raw + lambda_of * loss_of
else:
loss = loss_raw
raw_losses.update(loss_raw.item(), inputs.size(0))
if useFlow:
of_losses.update(loss_of.item(), inputs.size(0))
else:
of_losses.update(0., inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 5 == 0:
print(
'Block: ({}, {}), epoch {}, seg {}, batch {} of {}, raw loss: {}, of loss: {}'.format(
h_idx, w_idx, epoch, segIdx, idx, cur_dataset.__len__() // batch_size,
raw_losses.avg,
of_losses.avg))
# break
# break
# break
model_set[s_idx][h_idx][w_idx].append(cur_model.state_dict())
# /*-- A forward pass to store the training scores of optical flow and raw datasets respectively*/
for segIdx in range(totSegNum):
foreground_set = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-raw.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
foreground_set2 = np.load(os.path.join(data_root_dir, modality,
dataset_name + '_' + 'foreground_train_{}_seg_{}_{}_border_{}-flow.npy'.format(
foreground_extraction_mode, segIdx, context_frame_num, border_mode)))
cur_training_data = foreground_set[s_idx][h_idx][w_idx]
cur_training_data2 = foreground_set2[s_idx][h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
forward_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size//4, shuffle=False)
score_func = nn.MSELoss(reduce=False)
cur_model.eval()
for idx, (inputs, of_targets_all, _) in enumerate(forward_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
raw_scores = score_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
raw_training_scores_set[s_idx][h_idx][w_idx].append(raw_scores)
if useFlow:
of_scores = score_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1) # mse
of_training_scores_set[s_idx][h_idx][w_idx].append(of_scores)
raw_training_scores_set[s_idx][h_idx][w_idx] = np.concatenate(
raw_training_scores_set[s_idx][h_idx][w_idx], axis=0)
if useFlow:
of_training_scores_set[s_idx][h_idx][w_idx] = np.concatenate(
of_training_scores_set[s_idx][h_idx][w_idx], axis=0)
del cur_model, raw_losses, of_losses
torch.save(raw_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
torch.save(of_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe,
epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
else:
raw_losses = AverageMeter()
of_losses = AverageMeter()
torch.autograd.set_detect_anomaly(True)
for h_idx in range(len(foreground_set)):
for w_idx in range(len(foreground_set[h_idx])):
cur_training_data = foreground_set[h_idx][w_idx]
if len(cur_training_data) > 1: # num > 1 for data parallel
cur_training_data2 = foreground_set2[h_idx][w_idx]
cur_dataset = cube_to_train_dataset(cur_training_data, target=cur_training_data2)
cur_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size, shuffle=True)
cur_model = torch.nn.DataParallel(ConvTransformer_recon_correct(
tot_raw_num = tot_frame_num, nums_hidden = [32, 64, 128], num_layers=num_layers,
num_dec_frames=1, num_heads=num_heads, with_residual=True,
with_pos=True, pos_kind=pe, mode=0, use_flow=useFlow)).cuda()
if dataset_name == 'UCSDped2':
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.0)
else:
optimizer = optim.Adam(cur_model.parameters(), eps=1e-7, weight_decay=0.0)
cur_model.train()
for epoch in range(epochs):
for idx, (inputs, of_targets_all, _) in enumerate(cur_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
# print(torch.max(inputs))
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
loss_raw = loss_func(raw_targets.detach(), raw_outputs)
if useFlow:
loss_of = loss_func(of_targets.detach(), of_outputs)
if useFlow:
loss = lambda_raw * loss_raw + lambda_of * loss_of
else:
loss = loss_raw
raw_losses.update(loss_raw.item(), inputs.size(0))
if useFlow:
of_losses.update(loss_of.item(), inputs.size(0))
else:
of_losses.update(0., inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 5 == 0:
max_num = 20
print(
'Block: ({}, {}), epoch {}, batch {} of {}, raw loss: {}, of loss: {}'.format(h_idx,
w_idx,
epoch,
idx,
cur_dataset.__len__() // batch_size,
raw_losses.avg,
of_losses.avg))
model_set[h_idx][w_idx].append(cur_model.state_dict())
# /*-- A forward pass to store the training scores of optical flow and raw datasets respectively*/
forward_dataloader = DataLoader(dataset=cur_dataset, batch_size=batch_size//4, shuffle=False)
# raw_score_func = nn.MSELoss(reduce=False)
# of_score_func = nn.L1Loss(reduce=False)
score_func = nn.MSELoss(reduce=False)
cur_model.eval()
for idx, (inputs, of_targets_all, _) in enumerate(forward_dataloader):
inputs = inputs.cuda().type(torch.cuda.FloatTensor)
of_targets_all = of_targets_all.cuda().type(torch.cuda.FloatTensor)
of_outputs, raw_outputs, of_targets, raw_targets = cur_model(inputs, of_targets_all)
raw_scores = score_func(raw_targets, raw_outputs).cpu().data.numpy()
raw_scores = np.sum(np.sum(np.sum(np.sum(raw_scores, axis=4), axis=3), axis=2), axis=1) # mse
raw_training_scores_set[h_idx][w_idx].append(raw_scores)
if useFlow:
of_scores = score_func(of_targets, of_outputs).cpu().data.numpy()
of_scores = np.sum(np.sum(np.sum(np.sum(of_scores, axis=4), axis=3), axis=2), axis=1) # mse
of_training_scores_set[h_idx][w_idx].append(of_scores)
raw_training_scores_set[h_idx][w_idx] = np.concatenate(raw_training_scores_set[h_idx][w_idx], axis=0)
if useFlow:
of_training_scores_set[h_idx][w_idx] = np.concatenate(of_training_scores_set[h_idx][w_idx],
axis=0)
torch.save(raw_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'raw_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
torch.save(of_training_scores_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'of_training_scores_border_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
print('training scores saved')
torch.save(model_set, os.path.join(data_root_dir, modality,
dataset_name + '_' + 'model_{}_head_{}_layer_{}_length_{}_pe_{}_epoch_{}_lambda_{}_{}'.format(
border_mode, num_heads, num_layers, context_frame_num, pe, epochs, lambda_raw, lambda_of) + '_' + 'pyname_{}.npy'.format(pyfile_name)))
print('Training of {} for dataset: {} has completed!'.format(method, dataset_name))
else:
raise NotImplementedError
| 34,551 | 57.86201 | 197 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/fore_det/inference.py
|
import warnings
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmcv.image import imread, imwrite
import cv2
def imshow_bboxes(img,
bboxes,
bbox_color=(0, 255, 0),
thickness=1,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4).
bbox_color (RGB value): Color of bbox lines.
thickness (int): Thickness of lines.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
for bbox in bboxes:
left_top = (bbox[0], bbox[1])
right_bottom = (bbox[2], bbox[3])
cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness)
if show:
cv2.imshow(win_name, imread(img))
cv2.waitKey(wait_time)
if out_file is not None:
imwrite(img, out_file)
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
# TODO: merge this method with the one in BaseDetector
def show_result(img,
result,
class_names,
score_thr=0.3,
wait_time=0,
show=True,
out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
wait_time (int): Value of waitKey param.
show (bool, optional): Whether to show the image with opencv or not.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
Returns:
np.ndarray or None: If neither `show` nor `out_file` is specified, the
visualized image is returned, otherwise None is returned.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=show,
wait_time=wait_time,
out_file=out_file)
# if not (show or out_file):
# return img
return bboxes
def show_result_pyplot(img,
result,
class_names,
score_thr=0.3,
fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
img = show_result(
img, result, class_names, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
| 7,224 | 33.241706 | 79 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/fore_det/obj_det_with_motion.py
|
import mmcv
from mmcv.image import imread, imwrite
import cv2
from fore_det.inference import inference_detector, init_detector, show_result
import numpy as np
from sklearn import preprocessing
import os
from torch.utils.data import Dataset, DataLoader
from vad_datasets import ped_dataset, avenue_dataset, shanghaiTech_dataset
from configparser import ConfigParser
import time
cp = ConfigParser()
cp.read("config.cfg")
def getObBboxes(img, model, dataset_name):
if dataset_name == 'UCSDped2':
score_thr = 0.5
min_area_thr = 10 * 10
elif dataset_name == 'avenue':
score_thr = 0.25
min_area_thr = 40 * 40
elif dataset_name == 'ShanghaiTech':
score_thr = 0.5
min_area_thr = 8 * 8
else:
raise NotImplementedError
result = inference_detector(model, img)
#bboxes = show_result(img, result, model.CLASSES, score_thr)
bbox_result = result
bboxes = np.vstack(bbox_result)
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
bbox_areas = (y2 - y1 + 1) * (x2 - x1 + 1)
return bboxes[bbox_areas >= min_area_thr, :4]
def delCoverBboxes(bboxes, dataset_name):
if dataset_name == 'UCSDped2':
cover_thr = 0.6
elif dataset_name == 'avenue':
cover_thr = 0.6
elif dataset_name == 'ShanghaiTech':
cover_thr = 0.65
else:
raise NotImplementedError
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
bbox_areas = (y2-y1+1) * (x2-x1+1)
sort_idx = bbox_areas.argsort()#Index of bboxes sorted in ascending order by area size
keep_idx = []
for i in range(sort_idx.size):
#Calculate the point coordinates of the intersection
x11 = np.maximum(x1[sort_idx[i]], x1[sort_idx[i+1:]])
y11 = np.maximum(y1[sort_idx[i]], y1[sort_idx[i+1:]])
x22 = np.minimum(x2[sort_idx[i]], x2[sort_idx[i+1:]])
y22 = np.minimum(y2[sort_idx[i]], y2[sort_idx[i+1:]])
#Calculate the intersection area
w = np.maximum(0, x22-x11+1)
h = np.maximum(0, y22-y11+1)
overlaps = w * h
ratios = overlaps / bbox_areas[sort_idx[i]]
num = ratios[ratios > cover_thr]
if num.size == 0:
keep_idx.append(sort_idx[i])
return bboxes[keep_idx]
def imshow_bboxes(img,
bboxes,
bbox_color=(255,255,255),
thickness=1,
show=True,
win_name='',
wait_time=0,
out_file=None):
"""Draw bboxes on an image.
Args:
img (str or ndarray): The image to be displayed.
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4).
bbox_color (RGB value): Color of bbox lines.
thickness (int): Thickness of lines.
show (bool): Whether to show the image.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
out_file (str or None): The filename to write the image.
"""
assert bboxes.ndim == 2
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
for bbox in bboxes:
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
img = cv2.rectangle(
img, left_top, right_bottom, bbox_color, thickness)
if show:
cv2.imshow(win_name, img)
cv2.waitKey(wait_time)
if out_file is not None:
imwrite(img, out_file)
def getFgBboxes(cur_img, img_batch, bboxes, dataset_name, verbose=False):
if dataset_name == 'UCSDped2':
area_thr = 10 * 10
binary_thr = 18
extend = 2
gauss_mask_size = 3
elif dataset_name == 'avenue':
area_thr = 40 * 40
binary_thr = 18
extend = 2
gauss_mask_size = 5
elif dataset_name == 'ShanghaiTech':
area_thr = 8 * 8
binary_thr = 15
extend = 2
gauss_mask_size = 5
else:
raise NotImplementedError
sum_grad = 0
for i in range(img_batch.shape[0]-1):
img1 = img_batch[i,:,:,:]
img2 = img_batch[i+1,:,:,:]
img1 = cv2.GaussianBlur(img1, (gauss_mask_size, gauss_mask_size), 0)
img2 = cv2.GaussianBlur(img2, (gauss_mask_size, gauss_mask_size), 0)
grad = cv2.absdiff(img1, img2)
sum_grad = grad + sum_grad
sum_grad = cv2.threshold(sum_grad, binary_thr, 255, cv2.THRESH_BINARY)[1]
if verbose is True:
cv2.imshow('grad', sum_grad)
cv2.waitKey(0)
for bbox in bboxes:
bbox_int = bbox.astype(np.int32)
extend_y1 = np.maximum(0, bbox_int[1]-extend)
extend_y2 = np.minimum(bbox_int[3]+extend, sum_grad.shape[0])
extend_x1 = np.maximum(0, bbox_int[0]-extend)
extend_x2 = np.minimum(bbox_int[2]+extend, sum_grad.shape[1])
sum_grad[extend_y1:extend_y2+1, extend_x1:extend_x2+1] = 0
if verbose is True:
cv2.imshow('del_ob_bboxes', sum_grad)
cv2.waitKey(0)
sum_grad = cv2.cvtColor(sum_grad, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(sum_grad, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
fg_bboxes = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
sum_grad = cv2.rectangle(sum_grad, (x,y), (x+w,y+h), 255, 1)
area = (w+1) * (h+1)
if area > area_thr and w / h < 10 and h / w < 10:
extend_x1 = np.maximum(0, x-extend)
extend_y1 = np.maximum(0, y-extend)
extend_x2 = np.minimum(x+w+extend, sum_grad.shape[1])
extend_y2 = np.minimum(y+h+extend, sum_grad.shape[0])
fg_bboxes.append([extend_x1, extend_y1, extend_x2, extend_y2])
cur_img=cv2.UMat(cur_img).get()
cur_img = cv2.rectangle(cur_img, (extend_x1,extend_y1), (extend_x2,extend_y2), (0,255,0), 1)
if verbose is True:
cv2.imshow('all_fg_bboxes', sum_grad)
cv2.waitKey(0)
cv2.imshow('filter', cur_img)
cv2.waitKey(0)
return np.array(fg_bboxes)
def getBatch(data_folder, X_dataset, context_frame_num, idx, mode, file_format):
dataset = X_dataset(dir=data_folder, context_frame_num=context_frame_num, mode=mode, border_mode='hard', file_format=file_format)
print(dataset.tot_frame_num)
batch, _ = dataset.__getitem__(idx)
start_idx, end_idx = dataset.context_range(idx)
return batch, start_idx, end_idx
| 6,865 | 29.927928 | 133 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/fore_det/simple_patch.py
|
import numpy as np
import itertools
def get_patch_loc(h, w, h_num, w_num):
h_step = h / h_num
w_step = w / w_num
y_min_list = np.linspace(0, h-1, h_num, endpoint=False)
x_min_list = np.linspace(0, w-1, w_num, endpoint=False)
patch_loc = list()
for x_min, y_min in itertools.product(tuple(x_min_list), tuple(y_min_list)):
x_max = np.minimum(x_min + w_step, w-1)
y_max = np.minimum(y_min + h_step, h-1)
patch_loc.append(np.array([x_min, y_min, x_max, y_max]))
patch_loc = np.array(patch_loc)
return patch_loc
| 566 | 30.5 | 80 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/fore_det/__init__.py
| 0 | 0 | 0 |
py
|
|
MRMGA4VAD
|
MRMGA4VAD-main/obj_det_config/cascade_rcnn_r101_fpn_1x.py
|
# model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100),
keep_all_stages=False)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_r101_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 7,380 | 30.408511 | 78 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/helper/misc.py
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 2,218 | 28.197368 | 110 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/helper/visualization_helper.py
|
import numpy as np
import cv2
from flowlib import flow_to_image
import os
def visualize_score(score_map, big_number):
lower_bound = -1 * big_number
upper_bound = big_number
all_values = np.reshape(score_map, (-1, ))
all_values = all_values[all_values > lower_bound]
all_values = all_values[all_values < upper_bound]
max_val = all_values.max()
min_val = all_values.min()
visual_map = (score_map - min_val) / (max_val - min_val)
visual_map[score_map == lower_bound] = 0
visual_map[score_map == upper_bound] = 1
visual_map *= 255
visual_map = visual_map.astype(np.uint8)
return visual_map
def visualize_img(img):
if img.shape[2] == 2:
cv2.imshow('Optical flow', flow_to_image(img))
else:
cv2.imshow('Image', img)
cv2.waitKey(0)
def visualize_batch(batch):
if len(batch.shape) == 4:
if batch.shape[3] == 2:
batch = [flow_to_image(batch[i]) for i in range(batch.shape[0])]
cv2.imshow('Optical flow set', np.hstack(batch))
else:
batch = [batch[i] for i in range(batch.shape[0])]
cv2.imshow('Image sets', np.hstack(batch))
cv2.waitKey(0)
else:
if batch.shape[4] == 2:
batch = [np.hstack([flow_to_image(batch[j][i]) for i in range(batch[j].shape[0])]) for j in range(batch.shape[0])]
cv2.imshow('Optical flow set', np.vstack(batch))
else:
batch = [np.hstack([batch[j][i] for i in range(batch[j].shape[0])]) for j in range(batch.shape[0])]
cv2.imshow('Image sets', np.vstack(batch))
cv2.waitKey(0)
def visualize_pair(batch_1, batch_2):
if len(batch_1.shape) == 4:
if batch_1.shape[3] == 2:
batch_1 = [flow_to_image(batch_1[i]) for i in range(batch_1.shape[0])]
else:
batch_1 = [batch_1[i] for i in range(batch_1.shape[0])]
if batch_2.shape[3] == 2:
batch_2 = [flow_to_image(batch_2[i]) for i in range(batch_2.shape[0])]
else:
batch_2 = [batch_2[i] for i in range(batch_2.shape[0])]
# batch_1=cv2.cvtColor(np.float32(batch_1), cv2.COLOR_RGB2BGR)
# batch_2=cv2.cvtColor(np.float32(batch_2), cv2.COLOR_RGB2BGR)
# batch_1 = np.array(batch_1)[...,::-1]
# batch_2 = np.array(batch_2)[...,::-1]
cv2.namedWindow('Pair comparison', cv2.WINDOW_NORMAL)
cv2.imshow('Pair comparison', np.vstack([np.hstack(batch_1), np.hstack(batch_2)]))
cv2.waitKey(0)
else:
if batch_1.shape[4] == 2:
batch_1 = [flow_to_image(batch_1[-1][i]) for i in range(batch_1[-1].shape[0])]
else:
batch_1 = [batch_1[-1][i] for i in range(batch_1[-1].shape[0])]
if batch_2.shape[4] == 2:
batch_2 = [flow_to_image(batch_2[-1][i]) for i in range(batch_2[-1].shape[0])]
else:
batch_2 = [batch_2[-1][i] for i in range(batch_2[-1].shape[0])]
cv2.namedWindow('Pair comparison', cv2.WINDOW_NORMAL)
cv2.imshow('Pair comparison', np.vstack([np.hstack(batch_1), np.hstack(batch_2)]))
cv2.waitKey(0)
def visualize_recon(batch_1, batch_2, frame_idx, obj_id, dataset_name, save_dir):
if len(batch_1.shape) == 4:
# print(batch_1.dtype)
if batch_1.shape[3] == 2:
batchshow_1 = [flow_to_image(batch_1[j]) for j in range(batch_1.shape[0])]
else:
# batchshow_1 = [cv2.cvtColor(batch_1[j], cv2.COLOR_BGR2GRAY)
# for j in range(batch_1.shape[0])]
batchshow_1 = [batch_1[j] for j in range(batch_1.shape[0])]
# batch_1 = [batch_1[j] for j in range(batch_1.shape[0])]
if batch_2.shape[3] == 2:
batchshow_2 = [flow_to_image(batch_2[j]) for j in range(batch_2.shape[0])]
else:
# batchshow_2 = [cv2.cvtColor(batch_2[j], cv2.COLOR_BGR2GRAY)
# for j in range(batch_2.shape[0])]
batchshow_2 = [batch_2[j] for j in range(batch_2.shape[0])]
if batch_1.shape[3]==3:
batchtmp_1 = [cv2.normalize(batch_1[i], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) for i in
range(batch_1.shape[0])]
batchtmp_2 = [cv2.normalize(batch_2[i], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) for i in
range(batch_2.shape[0])]
error_rgb = [cv2.absdiff(batchtmp_1[i], batchtmp_2[i]) for i in range(batch_1.shape[0])]
error_gray = [cv2.cvtColor(error_rgb[i], cv2.COLOR_BGR2GRAY) for i in range(batch_1.shape[0])]
heatmap = [cv2.applyColorMap(error_gray[i], cv2.COLORMAP_JET) for i in range(batch_1.shape[0])]
# print(heatmap)
# batch_2 = np.array(batch_2)[...,::-1]
# cv2.namedWindow('Pair comparison', cv2.WINDOW_NORMAL)
if batch_1.shape[3]==3:
raw = np.vstack([np.hstack(batchshow_1), np.hstack(batchshow_2)])
# print(raw.shape)
error = np.vstack([np.hstack(heatmap)])
# cv2.imshow('Pair comparison', raw)
raw = raw*255
raw = raw.astype('uint8')
cv2.imwrite(os.path.join(save_dir, dataset_name, 'raw_{}_{}.png'.format(frame_idx, obj_id)),raw)
# cv2.imshow('error', error)
cv2.imwrite(os.path.join(save_dir, dataset_name, 'error_{}_{}.png'.format(frame_idx, obj_id)), error)
else:
error_rgb = [cv2.absdiff(batchshow_1[i], batchshow_2[i]) for i in range(batch_1.shape[0])]
error_gray = [cv2.cvtColor(error_rgb[i], cv2.COLOR_BGR2GRAY) for i in range(batch_1.shape[0])]
heatmap = [cv2.applyColorMap(error_gray[i], cv2.COLORMAP_JET) for i in range(batch_1.shape[0])]
error = np.vstack([np.hstack(heatmap)])
# cv2.imshow('Pair comparison', np.vstack([np.hstack(batchshow_1), np.hstack(batchshow_2)]))
# cv2.imwrite(os.path.join(save_dir, dataset_name, 'flow_{}_{}.png'.format(frame_idx, obj_id)), np.vstack([np.hstack(batchshow_1), np.hstack(batchshow_2)]))
cv2.imwrite(os.path.join(save_dir, dataset_name, 'flowerror_{}_{}.png'.format(frame_idx, obj_id)), error)
# cv2.waitKey(0)
# input("Press Enter to continue...")
else:
if batch_1.shape[4] == 2:
batch_1 = [flow_to_image(batch_1[-1][i]) for i in range(batch_1[-1].shape[0])]
else:
batch_1 = [batch_1[-1][i] for i in range(batch_1[-1].shape[0])]
if batch_2.shape[4] == 2:
batch_2 = [flow_to_image(batch_2[-1][i]) for i in range(batch_2[-1].shape[0])]
else:
batch_2 = [batch_2[-1][i] for i in range(batch_2[-1].shape[0])]
cv2.namedWindow('Pair comparison', cv2.WINDOW_NORMAL)
cv2.imshow('Pair comparison', np.vstack([np.hstack(batch_1), np.hstack(batch_2)]))
cv2.waitKey(0)
def visualize_pair_map(batch_1, batch_2):
if len(batch_1.shape) == 4:
if batch_1.shape[3] == 2:
batch_show_1 = [flow_to_image(batch_1[i]) for i in range(batch_1.shape[0])]
else:
batch_show_1 = [batch_1[i] for i in range(batch_1.shape[0])]
if batch_2.shape[3] == 2:
batch_show_2 = [flow_to_image(batch_2[i]) for i in range(batch_2.shape[0])]
else:
batch_show_2 = [batch_2[i] for i in range(batch_2.shape[0])]
cv2.namedWindow('Pair comparison', cv2.WINDOW_NORMAL)
if batch_1.shape[3] == 3 or batch_1.shape[3] == 1: # RGB or GRAYSCALE
batchtmp_1 = [cv2.normalize(batch_1[i], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) for i in range(batch_1.shape[0])]
batchtmp_2 = [cv2.normalize(batch_2[i], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) for i in range(batch_1.shape[0])]
if batch_1.shape[3] == 1:
error_gray = [cv2.absdiff(batchtmp_1[i], batchtmp_2[i]) for i in range(batch_1.shape[0])]
elif batch_1.shape[3] == 3:
error_rgb = [cv2.absdiff(batchtmp_1[i], batchtmp_2[i]) for i in range(batch_1.shape[0])]
error_gray = [cv2.cvtColor(error_rgb[i], cv2.COLOR_BGR2GRAY) for i in range(batch_1.shape[0])]
heatmap = [cv2.applyColorMap(error_gray[i], cv2.COLORMAP_JET) for i in range(batch_1.shape[0])]
if batch_1.shape[3] == 3: # RGB
cv2.namedWindow('Pair comparison AP', cv2.WINDOW_NORMAL)
cv2.imshow('Pair comparison AP', np.vstack([np.hstack(batch_show_1), np.hstack(batch_show_2), np.hstack(heatmap)]))
else: # GRAYSCALE
cv2.imshow('Pair comparison AP', np.vstack([np.hstack(batch_show_1), np.hstack(batch_show_2)])) # GRAYSCALE
cv2.imshow('Error AP', np.vstack([np.hstack(heatmap)])) # different color space: RGB
else:
cv2.imshow('Pair comparison OF', np.vstack([np.hstack(batch_show_1), np.hstack(batch_show_2)]))
cv2.waitKey(0)
else:
if batch_1.shape[4] == 2:
batch_1 = [flow_to_image(batch_1[-1][i]) for i in range(batch_1[-1].shape[0])]
else:
batch_1 = [batch_1[-1][i] for i in range(batch_1[-1].shape[0])]
if batch_2.shape[4] == 2:
batch_2 = [flow_to_image(batch_2[-1][i]) for i in range(batch_2[-1].shape[0])]
else:
batch_2 = [batch_2[-1][i] for i in range(batch_2[-1].shape[0])]
cv2.imshow('Pair comparison', np.vstack([np.hstack(batch_1), np.hstack(batch_2)]))
cv2.waitKey(0)
| 9,475 | 47.101523 | 168 |
py
|
MRMGA4VAD
|
MRMGA4VAD-main/helper/eval_accuracy.py
|
from __future__ import print_function, absolute_import
import numpy as np
__all__ = ['accuracy']
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def simple_accuracy(output, target):
output = output.numpy()
target = np.squeeze(target.numpy())
res = np.argmax(output, axis=1)
correct = np.sum(res == target)
return correct / target.shape[0]
def binary_accuracy(output, target):
output = np.squeeze(output.data.cpu().numpy())
target = np.squeeze(target.cpu().numpy())
res = np.zeros((output.shape[0],))
res[output > 0.5] = 1.
correct = np.sum(res == target)
return correct / target.shape[0]
| 1,034 | 26.972973 | 64 |
py
|
edge-approx
|
edge-approx-main/src/main/py/opt.py
|
#!/usr/bin/env python
import numpy as np
import ast
import sys
import math
import time
import random
import warnings
from scipy import optimize
if len(sys.argv) < 2:
print "Expected Input Data"
sys.exit(1)
warnings.simplefilter("ignore")
random.seed(0x12345678)
MI = []
PRED = []
MEAN = []
VAR = []
COSTS = []
V_BOUND = []
OBS = []
EXP_COND_V = []
NUM_STREAMS = 0
DIM = 0
C_BOUND = sum(OBS) / 10
lineNumber = 0
for line in sys.argv[1].split("\n"):
if lineNumber == 0:
NUM_STREAMS = int(line)
DIM = NUM_STREAMS * 2
elif lineNumber == 1:
C_BOUND = float(line)
elif lineNumber == 2:
OBS = ast.literal_eval(line)
elif lineNumber == 3:
PRED = ast.literal_eval(line)
elif lineNumber == 4:
MI = ast.literal_eval(line)
elif lineNumber == 5:
COSTS = ast.literal_eval(line)
elif lineNumber == 6:
MEAN = ast.literal_eval(line)
elif lineNumber == 7:
VAR = ast.literal_eval(line)
elif lineNumber == 8:
V_BOUND = np.array(ast.literal_eval(line))
elif lineNumber == 9:
EXP_COND_V = np.array(ast.literal_eval(line))
lineNumber += 1
if np.sum(np.array(OBS)) <= C_BOUND:
np.set_printoptions(suppress=True)
print np.array2string(np.array(OBS + ([0] * len(OBS))), separator=', ')
sys.exit(0)
if C_BOUND <= NUM_STREAMS:
np.set_printoptions(suppress=True)
print np.array2string(np.array(([1] * NUM_STREAMS) + ([0] * NUM_STREAMS)), separator=', ')
sys.exit(0)
NP_VAR = np.array(VAR)
PENALTY = 0.0
MI = np.ones(NUM_STREAMS)
W = np.ones(NUM_STREAMS) / (np.ones(NUM_STREAMS) + np.abs(np.array(MEAN)))
ERR = np.maximum(np.array([0.001] * NUM_STREAMS), W * (NP_VAR + PENALTY))
def obj(x):
val = 0.0
for i in range(NUM_STREAMS):
val = val + (ERR[i] / (x[i] + (MI[i] * x[i+NUM_STREAMS])))
return val
def jacobian(x):
jac = np.zeros(DIM)
for i in range(NUM_STREAMS):
a = ERR[i]
b = 1.0
denom = (x[i] + b*x[i + NUM_STREAMS])**2
jac[i] = -a / denom
jac[i + NUM_STREAMS] = -(a*b) / denom
return jac
H_EVAL_CNT = 0
def hessian(x):
hess = np.zeros((DIM, DIM))
for i in range(DIM):
streamIdx = i % NUM_STREAMS
a = ERR[i]
b = 1
for j in range(i, DIM):
denom = (x[streamIdx] + b*x[streamIdx + NUM_STREAMS])**3
if(i == j):
if i < NUM_STREAMS:
hess[i][j] = (2 * a) / denom
else:
hess[i][j] = (2 * a * b * b) / denom
elif j == (i + NUM_STREAMS):
value = (2 * a * b) / denom
hess[i][j] = value
hess[j][i] = value
#hess = hess + (0.00001 * np.identity(DIM))
return hess
def prop(samplesAllowed):
prop = np.zeros(NUM_STREAMS)
adjAllowance = samplesAllowed - NUM_STREAMS
total = 0.0
for i in range(NUM_STREAMS):
total += OBS[i]
for i in range(NUM_STREAMS):
prop[i] = OBS[i] / total
return np.ones(NUM_STREAMS) + prop * adjAllowance
def ney2(samplesAllowed):
prop = np.zeros(NUM_STREAMS)
adjAllowance = samplesAllowed - NUM_STREAMS
denom = 0.0
for i in range(NUM_STREAMS):
denom += OBS[i] * math.sqrt(VAR[i])
for i in range(NUM_STREAMS):
prop[i] = (OBS[i] * math.sqrt(VAR[i])) / denom
return np.ones(NUM_STREAMS) + prop * adjAllowance
cons = []
#
# COST
#
A3 = np.hstack((np.ones(NUM_STREAMS),
np.zeros(NUM_STREAMS)))
def c3(x):
cost = 0.0
for i in range(NUM_STREAMS):
if x[i + NUM_STREAMS] > 0.5:
cost += 1.0
cost += np.matmul(x, np.hstack((COSTS, np.zeros(NUM_STREAMS))))
return C_BOUND - cost
cons.append({"type": "eq", "fun": c3})
A4 = np.zeros((NUM_STREAMS, 2*NUM_STREAMS),dtype=float)
for i in range(A4.shape[0]):
A4[i][PRED[i]] = 1.0
A4[i][NUM_STREAMS + i] = -1.01
cons.append({"type": "ineq", "fun": lambda x: np.matmul(A4, x) })
#
# Lower bound on real + sim
#
A7 = np.hstack((np.identity(NUM_STREAMS, dtype = float),
np.identity(NUM_STREAMS, dtype = float)))
b7 = np.ones(NUM_STREAMS) + 0.01
cons.append({"type": "ineq", "fun": lambda x: np.matmul(A7, x) - b7 })
#
# Variation Bound on simulated values
#
vs = VAR - EXP_COND_V
n_one = np.ones(NUM_STREAMS) * -1.0
offs = 0.5 * np.hstack((np.zeros(NUM_STREAMS),
np.ones(NUM_STREAMS)))
A5 = np.zeros((NUM_STREAMS, 2*NUM_STREAMS))
for i in range(NUM_STREAMS):
A5[i][i] = -V_BOUND[i]
A5[i][NUM_STREAMS + i] = (VAR[i] - vs[i] - V_BOUND[i])
cons.append({"type": "ineq", "fun": lambda x: n_one * np.matmul(A5, x + offs)})
#
# Starting location
#
start = np.hstack((prop(C_BOUND),
np.zeros(NUM_STREAMS)))
unique, counts = np.unique(PRED, return_counts=True)
list1, list2 = zip(*sorted(zip(counts, unique)))
top2 = list2[len(list2)-2:]
for i in range(NUM_STREAMS):
sidx = i + NUM_STREAMS
if PRED[i] in top2:
start[sidx] = start[PRED[i]]
else:
start[sidx] = 0
#
# Bounds on the dimensions
#
bnds = []
for i in range(NUM_STREAMS):
bnds.append((0, OBS[i]))
for i in range(NUM_STREAMS):
bnds.append((0, OBS[PRED[i]]))
bnds = tuple(bnds)
st = time.time()
result = optimize.minimize(obj, start,
method="SLSQP",
jac = jacobian,
hess = hessian,
options={'xtol': 1e-1, 'gtol': 1e-02, 'maxiter':200 },
bounds=bnds,
constraints=cons)
et = time.time()
if not result.success and result.status is not 9 and result.status is not 0:
pass
#print "FAILED"
#print result.status
#print result.message
np.set_printoptions(suppress=True)
print np.array2string(result.x.clip(0.0), separator=', ')
| 5,898 | 22.501992 | 94 |
py
|
gecco-2022
|
gecco-2022-main/AND-XOR/plotMOO2.py
|
## plot stuff after loading everything from the pickled files for MOO
import time, array, random, copy, math
import numpy as np
from deap import algorithms, base, benchmarks, tools, creator
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
import random
import pickle
from os.path import exists
import os
from ConfigPlot import ConfigPlot_DiffStiffness2, ConfigPlot_DiffStiffness3
from MD_functions import MD_VibrSP_ConstV_Yfixed_DiffK, FIRE_YFixed_ConstV_DiffK, MD_VibrSP_ConstV_Yfixed_DiffK2
from MD_functions import MD_VibrSP_ConstV_Yfixed_DiffK_Freqs, MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs
from DynamicalMatrix import DM_mass_DiffK_Yfixed
from joblib import Parallel, delayed
import multiprocessing
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
def evaluate(indices):
#%% Initial Configuration
m1 = 1
m2 = 10
k1 = 1.
k2 = 10.
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
#w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
#w = np.real(w)
#v = np.real(v)
#freq = np.sqrt(np.absolute(w))
#ind_sort = np.argsort(freq)
#freq = freq[ind_sort]
#v = v[:, ind_sort]
#ind = freq > 1e-4
#eigen_freq = freq[ind]
#eigen_mode = v[:, ind]
#w_delta = eigen_freq[1:] - eigen_freq[0:-1]
#index = np.argmax(w_delta)
#F_low_exp = eigen_freq[index]
#F_high_exp = eigen_freq[index+1]
#print("specs:")
#print(F_low_exp)
#print(F_high_exp)
#print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 7
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
andness = 2*gain1/(gain2+gain3)
# we are designing an and gait at this frequency
Freq_Vibr = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
XOR = (gain2+gain3)/(2*gain1)
print("done eval", flush=True)
return andness, XOR
def showPacking(indices):
k1 = 1.
k2 = 10.
n_col = 6
n_row = 5
N = n_col*n_row
m1=1
m2=10
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
# show packing
ConfigPlot_DiffStiffness3(N, x0, y0, D, [Lx,Ly], k_type, 0, '/Users/atoosa/Desktop/results/packing.pdf', ind_in1, ind_in2, ind_out)
def plotInOut_and(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
#plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.text(0.2, 0.8, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.hlines(y=7, xmin=0, xmax=50, linewidth=1, linestyle='dashdot', color='limegreen', alpha=0.9)
plt.hlines(y=10, xmin=0, xmax=50, linewidth=1, linestyle='dotted', color='brown', alpha=0.9)
plt.text(51, 5, '$\omega=7$', fontsize=12, color='limegreen', alpha=0.9)
plt.text(51, 12, '$\omega=10$', fontsize=12, color='brown', alpha=0.9)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 7
# case 0, input [0, 0]
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.hlines(y=0, xmin=0, xmax=30, color='green', label='Input1', linestyle='dotted')
plt.hlines(y=0, xmin=0, xmax=30, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.hlines(y=0, xmin=0, xmax=30, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 00", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim(0, 0.005)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.hlines(y=0, xmin=0, xmax=10000, color='green', label='Input1', linestyle='solid')
plt.hlines(y=0, xmin=0, xmax=10000, color='blue', label='Input2', linestyle='dotted')
plt.hlines(y=0, xmin=0, xmax=10000, color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 00", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim(-0.0100, 0.0100)
plt.tight_layout()
plt.show()
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain1)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain2)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain3)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
print("gain1:")
print(gain1)
print("gain2:")
print(gain2)
print("gain3:")
print(gain3)
andness = 2*gain1/(gain2+gain3)
return andness
def plotInOut_xor(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain1)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain2)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain3)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
print("gain1:")
print(gain1)
print("gain2:")
print(gain2)
print("gain3:")
print(gain3)
XOR = (gain2+gain3)/(2*gain1)
return XOR
def plotInOut_adder(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
#plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.text(0.2, 0.8, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.hlines(y=7, xmin=0, xmax=50, linewidth=1, linestyle='dashdot', color='limegreen', alpha=0.9)
plt.hlines(y=10, xmin=0, xmax=50, linewidth=1, linestyle='dotted', color='brown', alpha=0.9)
plt.text(51, 5, '$\omega=7$', fontsize=12, color='limegreen', alpha=0.9)
plt.text(51, 12, '$\omega=10$', fontsize=12, color='brown', alpha=0.9)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr1 = 7
Freq_Vibr2 = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain1)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain2)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain3)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
return 0
#cleaning up the data files
#try:
# os.remove("indices.pickle")
#except OSError:
# pass
#try:
# os.remove("outputs.pickle")
#except OSError:
# pass
# deap setup:
random.seed(a=42)
creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 30)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evaluate)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selNSGA2)
# parallelization?
#toolbox.register("map", futures.map)
stats = tools.Statistics()
stats.register("pop", copy.deepcopy)
toolbox.pop_size = 50
toolbox.max_gen = 250
toolbox.mut_prob = 0.8
logbook = tools.Logbook()
logbook.header = ["gen", "evals"] + stats.fields
hof = tools.HallOfFame(1, similar=np.array_equal) #can change the size
# load the results from the files
res = pickle.load(open('results.pickle', 'rb'))
hof = pickle.load(open('hofs.pickle', 'rb'))
log = pickle.load(open('logs.pickle', 'rb'))
# plot fronts
plt.figure(figsize=(4,4))
num_cores = multiprocessing.cpu_count()
for i in len(log):
if i==0:
fronts = tools.emo.sortLogNondominated(log.select('pop')[i], len(log.select('pop')[i]))
for a,inds in enumerate(fronts):
indices = []
for ind in inds:
indices.append(np.array(ind))
outputs = Parallel(n_jobs=num_cores)(delayed(evaluate)(ind) for ind in indices)
print(outputs)
print(indices)
pickle.dump(indices, open('indices0.pickle', 'wb'))
pickle.dump(outputs, open('outputs0.pickle', 'wb'))
for points in outputs:
plt.scatter(x=point[0], y=point[1], color='blue', marker='o', alpha=0.4)
plt.scatter(x=point[0], y=point[1], color='blue', marker='o', alpha=0.4, label='Gen = 0')
elif i==toolbox.max_gen/2:
fronts = tools.emo.sortLogNondominated(log.select('pop')[i], len(log.select('pop')[i]))
for a,inds in enumerate(fronts):
indices = []
for ind in inds:
indices.append(np.array(ind))
outputs = Parallel(n_jobs=num_cores)(delayed(evaluate)(ind) for ind in indices)
print(outputs)
print(indices)
pickle.dump(indices, open('indices1.pickle', 'wb'))
pickle.dump(outputs, open('outputs1.pickle', 'wb'))
for points in outputs:
plt.scatter(x=point[0], y=point[1], color='green', marker='o', alpha=0.4)
plt.scatter(x=point[0], y=point[1], color='green', marker='o', alpha=0.4, label='Gen = 125')
elif i==toolbox.max_gen:
fronts = tools.emo.sortLogNondominated(log.select('pop')[i], len(log.select('pop')[i]))
for a,inds in enumerate(fronts):
indices = []
for ind in inds:
indices.append(np.array(ind))
outputs = Parallel(n_jobs=num_cores)(delayed(evaluate)(ind) for ind in indices)
print(outputs)
print(indices)
pickle.dump(indices, open('indices2.pickle', 'wb'))
pickle.dump(outputs, open('outputs2.pickle', 'wb'))
for points in outputs:
plt.scatter(x=point[0], y=point[1], color='red', marker='o', alpha=0.4)
plt.scatter(x=point[0], y=point[1], color='red', marker='o', alpha=0.4, label='Gen = 250')
plt.xlabel('$f_1(\mathbf{x})$'+' = AND-ness', fontsize=16)
plt.ylabel('$f_2(\mathbf{x})$'+' = XOR-ness', fontsize=16)
plt.title("Pareto Front", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# evaluate and plot an individual
#showPacking([0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0])
#plotInOut_and([0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0])
#plotInOut_xor([0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0])
#plotInOut_adder([0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0])
| 51,781 | 39.172227 | 267 |
py
|
gecco-2022
|
gecco-2022-main/AND-XOR/front.py
|
## plot stuff after loading everything from the pickled files for MOO
import time, array, random, copy, math
import numpy as np
from deap import algorithms, base, benchmarks, tools, creator
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
import random
import pickle
from os.path import exists
import os
from ConfigPlot import ConfigPlot_DiffStiffness2, ConfigPlot_DiffStiffness3
from MD_functions import MD_VibrSP_ConstV_Yfixed_DiffK, FIRE_YFixed_ConstV_DiffK, MD_VibrSP_ConstV_Yfixed_DiffK2
from MD_functions import MD_VibrSP_ConstV_Yfixed_DiffK_Freqs, MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs
from DynamicalMatrix import DM_mass_DiffK_Yfixed
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
def evaluate(indices):
#%% Initial Configuration
m1 = 1
m2 = 10
k1 = 1.
k2 = 10.
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
#w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
#w = np.real(w)
#v = np.real(v)
#freq = np.sqrt(np.absolute(w))
#ind_sort = np.argsort(freq)
#freq = freq[ind_sort]
#v = v[:, ind_sort]
#ind = freq > 1e-4
#eigen_freq = freq[ind]
#eigen_mode = v[:, ind]
#w_delta = eigen_freq[1:] - eigen_freq[0:-1]
#index = np.argmax(w_delta)
#F_low_exp = eigen_freq[index]
#F_high_exp = eigen_freq[index+1]
#print("specs:")
#print(F_low_exp)
#print(F_high_exp)
#print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 7
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
andness = 2*gain1/(gain2+gain3)
# we are designing an and gait at this frequency
Freq_Vibr = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
XOR = (gain2+gain3)/(2*gain1)
print("done eval", flush=True)
return andness, XOR
def showPacking(indices):
k1 = 1.
k2 = 10.
n_col = 6
n_row = 5
N = n_col*n_row
m1=1
m2=10
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
# show packing
ConfigPlot_DiffStiffness3(N, x0, y0, D, [Lx,Ly], k_type, 0, '/Users/atoosa/Desktop/results/packing.pdf', ind_in1, ind_in2, ind_out)
def plotInOut_and(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
#plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.text(0.2, 0.8, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.hlines(y=7, xmin=0, xmax=50, linewidth=1, linestyle='dashdot', color='limegreen', alpha=0.9)
plt.hlines(y=10, xmin=0, xmax=50, linewidth=1, linestyle='dotted', color='brown', alpha=0.9)
plt.text(51, 5, '$\omega=7$', fontsize=12, color='limegreen', alpha=0.9)
plt.text(51, 12, '$\omega=10$', fontsize=12, color='brown', alpha=0.9)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 7
# case 0, input [0, 0]
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.hlines(y=0, xmin=0, xmax=30, color='green', label='Input1', linestyle='dotted')
plt.hlines(y=0, xmin=0, xmax=30, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.hlines(y=0, xmin=0, xmax=30, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 00", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim(0, 0.005)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.hlines(y=0, xmin=0, xmax=10000, color='green', label='Input1', linestyle='solid')
plt.hlines(y=0, xmin=0, xmax=10000, color='blue', label='Input2', linestyle='dotted')
plt.hlines(y=0, xmin=0, xmax=10000, color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 00", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim(-0.0100, 0.0100)
plt.tight_layout()
plt.show()
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain1)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain2)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain3)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
print("gain1:")
print(gain1)
print("gain2:")
print(gain2)
print("gain3:")
print(gain3)
andness = 2*gain1/(gain2+gain3)
return andness
def plotInOut_xor(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out1 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain1 = out1/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain1)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out2 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain2 = out2/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain2)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
ind = np.where(freq_fft>Freq_Vibr)
index_=ind[0][0]
# fft of the output port at the driving frequency
out3 = fft_x_out[index_-1] + (fft_x_out[index_]-fft_x_out[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input1 at driving frequency
inp1 = fft_in1[index_-1] + (fft_in1[index_]-fft_in1[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
# fft of input2 at driving frequency
inp2 = fft_in2[index_-1] + (fft_in2[index_]-fft_in2[index_-1])*((Freq_Vibr-freq_fft[index_-1])/(freq_fft[index_]-freq_fft[index_-1]))
gain3 = out3/(inp1+inp2)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
myText = 'Gain='+"{:.3f}".format(gain3)
plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
print("gain1:")
print(gain1)
print("gain2:")
print(gain2)
print("gain3:")
print(gain3)
XOR = (gain2+gain3)/(2*gain1)
return XOR
def plotInOut_adder(indices):
#%% Initial Configuration
k1 = 1.
k2 = 10.
m1 = 1
m2 = 10
n_col = 6
n_row = 5
N = n_col*n_row
Nt_fire = 1e6
dt_ratio = 40
Nt_SD = 1e5
Nt_MD = 1e5
dphi_index = -1
dphi = 10**dphi_index
d0 = 0.1
d_ratio = 1.1
Lx = d0*n_col
Ly = (n_row-1)*np.sqrt(3)/2*d0+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
phi0 = N*np.pi*d0**2/4/(Lx*Ly)
d_ini = d0*np.sqrt(1+dphi/phi0)
D = np.zeros(N)+d_ini
#D = np.zeros(N)+d0
x0 = np.zeros(N)
y0 = np.zeros(N)
for i_row in range(1, n_row+1):
for i_col in range(1, n_col+1):
ind = (i_row-1)*n_col+i_col-1
if i_row%2 == 1:
x0[ind] = (i_col-1)*d0
else:
x0[ind] = (i_col-1)*d0+0.5*d0
y0[ind] = (i_row-1)*np.sqrt(3)/2*d0
y0 = y0+0.5*d0
mass = np.zeros(N) + 1
k_list = np.array([k1, k2, k1 * k2 / (k1 + k2)])
k_type = indices #np.zeros(N, dtype=np.int8)
#k_type[indices] = 1
# Steepest Descent to get energy minimum
#x_ini, y_ini, p_now = MD_YFixed_ConstV_SP_SD_DiffK(Nt_SD, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
x_ini, y_ini, p_now = FIRE_YFixed_ConstV_DiffK(Nt_fire, N, x0, y0, D, mass, Lx, Ly, k_list, k_type)
# skip the steepest descent for now to save time
#x_ini = x0
#y_ini = y0
# calculating the bandgap - no need to do this in this problem
w, v = DM_mass_DiffK_Yfixed(N, x_ini, y_ini, D, mass, Lx, 0.0, Ly, k_list, k_type)
w = np.real(w)
v = np.real(v)
freq = np.sqrt(np.absolute(w))
ind_sort = np.argsort(freq)
freq = freq[ind_sort]
v = v[:, ind_sort]
ind = freq > 1e-4
eigen_freq = freq[ind]
eigen_mode = v[:, ind]
w_delta = eigen_freq[1:] - eigen_freq[0:-1]
index = np.argmax(w_delta)
F_low_exp = eigen_freq[index]
F_high_exp = eigen_freq[index+1]
plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.scatter(np.arange(0, len(eigen_freq)), eigen_freq, marker='x', color='blue')
plt.xlabel(r"Index $(k)$", fontsize=16)
plt.ylabel(r"Frequency $(\omega)$", fontsize=16)
plt.title("Frequency Spectrum", fontsize=16, fontweight="bold")
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
props = dict(facecolor='green', alpha=0.1)
myText = r'$\omega_{low}=$'+"{:.2f}".format(F_low_exp)+"\n"+r'$\omega_{high}=$'+"{:.2f}".format(F_high_exp)+"\n"+r'$\Delta \omega=$'+"{:.2f}".format(max(w_delta))
#plt.text(0.78, 0.15, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.text(0.2, 0.8, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16, bbox=props)
plt.hlines(y=7, xmin=0, xmax=50, linewidth=1, linestyle='dashdot', color='limegreen', alpha=0.9)
plt.hlines(y=10, xmin=0, xmax=50, linewidth=1, linestyle='dotted', color='brown', alpha=0.9)
plt.text(51, 5, '$\omega=7$', fontsize=12, color='limegreen', alpha=0.9)
plt.text(51, 12, '$\omega=10$', fontsize=12, color='brown', alpha=0.9)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
plt.show()
print("specs:")
print(F_low_exp)
print(F_high_exp)
print(max(w_delta))
# specify the input ports and the output port
SP_scheme = 0
digit_in = SP_scheme//2
digit_out = SP_scheme-2*digit_in
ind_in1 = int((n_col+1)/2)+digit_in - 1
ind_in2 = ind_in1 + 2
ind_out = int(N-int((n_col+1)/2)+digit_out)
ind_fix = int((n_row+1)/2)*n_col-int((n_col+1)/2)
B = 1
Nt = 1e4 # it was 1e5 before, i reduced it to run faster
# we are designing an and gait at this frequency
Freq_Vibr1 = 7
Freq_Vibr2 = 10
# case 1, input [1, 1]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain1)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 11", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 2, input [1, 0]
Amp_Vibr1 = 1e-2
Amp_Vibr2 = 0
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain2)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 10", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
# case 3, input [0, 1]
Amp_Vibr1 = 0
Amp_Vibr2 = 1e-2
# changed the resonator to one in MD_functions file and vibrations in x direction
freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, mean_cont, nt_rec, Ek_now, Ep_now, cont_now = MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(freq_fft, fft_in1, color='green', label='Input1', linestyle='dotted')
plt.plot(freq_fft, fft_in2, color='blue', label='Input2', linestyle=(0, (3, 5, 1, 5)))
plt.plot(freq_fft, fft_x_out, color='red', label='Output', linestyle='dashed')
plt.xlabel("Frequency", fontsize=16)
plt.ylabel("Amplitude of FFT", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
#plt.axvline(x=Freq_Vibr, color='purple', linestyle='solid', alpha=0.5)
#myText = 'Gain='+"{:.3f}".format(gain3)
#plt.text(0.5, 0.9, myText, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
x_in1, x_in2, x_out = MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D, mass, [Lx, Ly], Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out)
fig = plt.figure(figsize=(6.4,4.8))
ax = plt.axes()
plt.plot(x_in1, color='green', label='Input1', linestyle='solid')
plt.plot(x_in2, color='blue', label='Input2', linestyle='dotted')
plt.plot(x_out-np.mean(x_out, axis=0), color='red', label='Output', linestyle='solid')
plt.xlabel("Time Steps", fontsize=16)
plt.ylabel("Displacement", fontsize=16)
plt.title("Logic Gate Response - input = 01", fontsize=16)
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc='upper right', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.show()
return 0
creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, 30)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evaluate)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selNSGA2)
indices = pickle.load(open('indices.pickle', 'rb'))
outputs = pickle.load(open('outputs.pickle', 'rb'))
ploting = []
# plot the pareto front again without annotation
plt.figure(figsize=(4,4))
for i in indices:
ind = np.array(i)
showPacking(ind)
print(outputs)
| 48,107 | 39.156928 | 267 |
py
|
gecco-2022
|
gecco-2022-main/AND-XOR/MD_functions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 13 13:09:27 2017
@author: Hightoutou
"""
import numpy as np
import time
#from numba import jit
from FFT_functions import FFT_Fup, FFT_vCorr
from plot_functions import Line_multi, Line_yy, Line_single
from ConfigPlot import ConfigPlot_YFixed_rec
import matplotlib.pyplot as plt
#import IPython.core.debugger
#dbg = IPython.core.debugger.Pdb()
#@jit
def force_YFixed(Fx, Fy, N, x, y, D, Lx, y_bot, y_up):
Fup = 0
Fbot = 0
Ep = 0
cont = 0
cont_up = 0
p_now = 0
for nn in np.arange(N):
d_up = y_up-y[nn]
d_bot = y[nn]-y_bot
r_now = 0.5*D[nn]
if d_up<r_now:
F = -(1-d_up/r_now)/(r_now)
Fup -= F
Fy[nn] += F
Ep += (1/2)*(1-d_up/r_now)**2
cont_up += 1
cont += 1
#dbg.set_trace()
if d_bot<r_now:
F = -(1-d_bot/r_now)/(r_now)
Fbot += F
Fy[nn] -= F
Ep += (1/2)*(1-d_bot/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < Dmn:
dx = x[mm]-x[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Fup, Fbot, Ep, cont, p_now, cont_up
def force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D, Lx, y_bot, y_up, k_list, k_type, VL_list, VL_counter):
Fup = 0
Fbot = 0
Ep = 0
cont = 0
cont_up = 0
p_now = 0
for nn in np.arange(N):
d_up = y_up - y[nn]
d_bot = y[nn] - y_bot
r_now = 0.5 * D[nn]
if d_up < r_now:
F = -k_list[k_type[nn]] * (1 - d_up / r_now) / (r_now)
Fup -= F
Fy[nn] += F
Ep += 0.5 * k_list[k_type[nn]] * (1 - d_up / r_now)**2
cont_up += 1
cont += 1
#dbg.set_trace()
if d_bot < r_now:
F = -k_list[k_type[nn]] * (1 - d_bot / r_now) / (r_now)
Fbot += F
Fy[nn] -= F
Ep += 0.5 * k_list[k_type[nn]] * (1 - d_bot / r_now)**2
cont += 1
for vl_idx in np.arange(VL_counter):
nn = VL_list[vl_idx][0]
mm = VL_list[vl_idx][1]
dy = y[mm] - y[nn]
Dmn = 0.5 * (D[mm] + D[nn])
if abs(dy) < Dmn:
dx = x[mm] - x[nn]
dx = dx - round(dx / Lx) * Lx
if abs(dx) < Dmn:
dmn = np.sqrt(dx**2 + dy**2)
if dmn < Dmn:
k = k_list[(k_type[nn] ^ k_type[mm]) + np.maximum(k_type[nn], k_type[mm])]
F = -k * (1 - dmn / Dmn) / Dmn / dmn
Fx[nn] += F * dx
Fx[mm] -= F * dx
Fy[nn] += F * dy
Fy[mm] -= F * dy
Ep += 0.5 * k * (1 - dmn / Dmn)**2
cont += 1
p_now += (-F) * (dx**2 + dy**2)
return Fx, Fy, Fup, Fbot, Ep, cont, p_now, cont_up
def force_YFixed_upDS(Fx, Fy, N, x, y, D, Lx, y_bot, y_up, ind_up):
Fup = 0
Fbot = 0
Ep = 0
cont = 0
cont_up = 0
p_now = 0
for nn in np.arange(N):
d_up = y_up-y[nn]
d_bot = y[nn]-y_bot
r_now = 0.5*D[nn]
if ind_up[nn] == 1:
F = -(1-d_up/r_now)/(r_now)
Fup -= F
Fy[nn] += F
Ep += (1/2)*(1-d_up/r_now)**2
#dbg.set_trace()
if d_up<r_now:
cont_up = cont_up+1
cont += 1
if d_bot<r_now:
F = -(1-d_bot/r_now)/(r_now)
Fbot += F
Fy[nn] -= F
Ep += (1/2)*(1-d_bot/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < Dmn:
dx = x[mm]-x[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Fup, Fbot, Ep, cont, p_now, cont_up
#@jit
def force_Regular(Fx, Fy, N, x, y, D, Lx, Ly):
Ep = 0
cont = 0
p_now = 0
for nn in np.arange(N):
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
dy = dy-round(dy/Ly)*Ly
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < Dmn:
dx = x[mm]-x[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Ep, cont, p_now
def MD_UpDownFixed_SD(N, x0, y0, D0, m0, L):
dt = min(D0)/40
Nt = int(1e4)
Ep = np.zeros(Nt)
F_up = np.zeros(Nt)
F_bot = np.zeros(Nt)
F_tot = np.zeros(Nt)
Fup_now = 0
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
#Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
F_up[nt] = Fup_now
F_bot[nt] = Fbot_now
Ep[nt] = Ep_now
vx = np.divide(Fx, m0)
vy = np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
t_end = time.time()
print ("time=%.3e" %(t_end-t_start))
if 1 == 0:
# Plot the amplitide of F
Line_single(range(Nt), F_tot[0:Nt], '-', 't', 'Ftot', 'log', yscale='log')
return x, y
def MD_VibrBot_ForceUp(N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr):
dt = min(D0)/40
Nt = int(5e4)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
#y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))
vx = np.zeros(N)
vy = np.zeros(N)
if 1 == 0:
y_bot = np.zeros(Nt)
vx = np.random.rand(N)
vy = np.random.rand(N)
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
T_set = 1e-6
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now = force_YFixed(Fx, Fy, N, x, y, D0, L[0], y_bot[nt], L[1])
#Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
F_up[nt] = Fup_now
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
freq_now, fft_now = FFT_Fup(Nt, F_up[:Nt], dt, Freq_Vibr)
# Plot the amplitide of F
if 1 == 1:
Line_yy([dt*range(Nt), dt*range(Nt)], [F_up[0:Nt],y_bot[0:Nt]], ['-', ':'], 't', ['$F_{up}$', '$y_{bottom}$'])
Etot = Ep[1:Nt]+Ek[1:Nt]
xdata = [dt*range(Nt), dt*range(Nt), dt*range(Nt-1)]
ydata = [Ep[0:Nt], Ek[0:Nt], Etot]
line_spec = ['--', ':', 'r-']
Line_multi(xdata, ydata, line_spec, 't', '$E$', 'log')
print("std(Etot)=%e\n" %(np.std(Etot)))
#dt2 = 1e-3
#xx = np.arange(0, 5, dt2)
#yy = np.sin(50*xx)+np.sin(125*xx)
#print("dt=%e, w=%f\n" % (dt, Freq_Vibr))
FFT_Fup(Nt, F_up[:Nt], dt, Freq_Vibr)
#FFT_Fup(yy.size, yy, dt2, 50)
return freq_now, fft_now, np.mean(cont)
def MD_Periodic_equi(Nt, N, x0, y0, D0, m0, L, T_set, V_em, n_em):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
vx_rec = np.zeros([Nt, N])
vy_rec = np.zeros([Nt, N])
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
#for ii in [60]:
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx_rec[nt] = vx
vy_rec[nt] = vy
t_end = time.time()
print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
#Etot = Ep[1:Nt]+Ek[1:Nt]
#xdata = [dt*range(Nt), dt*range(Nt), dt*range(Nt-1)]
#ydata = [Ep[0:Nt], Ek[0:Nt], Etot]
#line_spec = ['--', ':', 'r-']
#Line_multi(xdata, ydata, line_spec, 't', '$E$', 'log', 'log')
freq_now, fft_now = FFT_vCorr(Nt, N, vx_rec, vy_rec, dt)
return freq_now, fft_now, np.mean(cont)
def MD_YFixed_ConstP_SD(Nt, N, x0, y0, D0, m0, L, F0_up):
dt = D0[0]/40
Nt = int(Nt)
#Nt = int(5e6)
#Nt = int(5e2)
Ep = np.zeros(Nt)
F_up = np.zeros(Nt)
F_bot = np.zeros(Nt)
F_tot = np.zeros(Nt)
Fup_now = 0
y_up = y0[N]
vx = np.zeros(N+1)
vy = np.zeros(N+1)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], 0, y[N])
F_up[nt] = Fup_now+F0_up
F_bot[nt] = Fbot_now
Ep[nt] = Ep_now+(y_up-y[N])*F0_up
vx = 0.1*np.divide(np.append(Fx,0), m0)
vy = 0.1*np.divide(np.append(Fy, F_up[nt]), m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
#print("nt=%d, Fup=%e, Fup_tot=%e\n" % (nt, Fup_now, F_up[nt]))
#dbg.set_trace()
t_end = time.time()
print ("F_tot=%.3e\n" %(F_tot[nt]))
print ("time=%.3e" %(t_end-t_start))
if 1 == 0:
# Plot the amplitide of F
Line_single(range(Nt), F_tot[0:Nt], '-', 't', 'Ftot', 'log', yscale='log')
#Line_single(range(Nt), -F_up[0:Nt], '-', 't', 'Fup', 'log', yscale='log')
#Line_single(range(Nt), Ep[0:Nt], '-', 't', 'Ep', 'log', yscale='linear')
return x, y, p_now
def MD_VibrBot_DispUp_ConstP(mark_upDS, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, F0_up):
dt = D0[0]/40
B = 0.1 # damping coefficient
Nt = int(5e7)
#Nt = int(5e2)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
y_up0 = y_ini[N]
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
cont_up = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
#y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))
vx = np.zeros(N+1)
vy = np.zeros(N+1)
# for test
if 1 == 0:
y_bot = np.zeros(Nt)
vx = np.random.rand(N+1)
vx[N] = 0
vy = np.random.rand(N+1)
vy[N] = 0
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
T_set = 1e-6
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
if mark_upDS == 1:
ind_up = np.zeros(N)
for ii in np.arange(N):
d_up = y[N]-y[ii]
r_now = 0.5*D0[ii]
if d_up<r_now:
ind_up[ii] = 1
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
if mark_upDS == 0:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N])
elif mark_upDS == 1:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed_upDS(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N], ind_up)
F_up[nt] = Fup_now+F0_up
Ep[nt] = Ep_now+(y_up0-y[N])*F0_up
cont[nt] = cont_now
cont_up[nt] = cont_up_now
p[nt] = p_now
Fx_all = np.append(Fx,0)-B*vx
Fy_all = np.append(Fy, F_up[nt])-B*vy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
y_up = y_up-y_up0
freq_y, fft_y = FFT_Fup(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
#freq_y, fft_y_real, fft_y_imag = FFT_Fup_RealImag(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
freq_bot, fft_bot = FFT_Fup(int(Nt/2), y_bot[int(Nt/2):Nt], dt, Freq_Vibr)
# plot the energy to see when the system reaches steady state
if 1 == 0:
Etot = Ep+Ek
nt_start = int(1e3)
xdata = [range(nt_start, Nt), range(nt_start, Nt), range(Nt)]
ydata = [Ep[nt_start:Nt], Ek[nt_start:Nt], Etot]
line_spec = [':', ':', 'r-']
Line_multi(xdata, ydata, line_spec, 't', '$E$', 'linear', 'log')
# Plot the amplitide of F
if 1 == 0:
Line_yy([dt*range(Nt), dt*range(Nt)], [F_up[0:Nt],y_bot[0:Nt]], ['-', ':'], 't', ['$F_{up}$', '$y_{bottom}$'])
Line_yy([dt*range(Nt), dt*range(Nt)], [y_up[0:Nt],y_bot[0:Nt]], ['-', ':'], 't', ['$y_{up}$', '$y_{bottom}$'])
Line_single(range(Nt), p[0:Nt], '-', 't', 'p', 'log', 'linear')
Etot = Ep[1:Nt]+Ek[1:Nt]
xdata = [dt*range(Nt), dt*range(Nt), dt*range(Nt-1)]
ydata = [Ep[0:Nt], Ek[0:Nt], Etot]
line_spec = ['--', ':', 'r-']
#Line_multi(xdata, ydata, line_spec, 't', '$E$', 'log')
print("std(Etot)=%e\n" %(np.std(Etot)))
return freq_y, fft_y, freq_bot, fft_bot, np.mean(cont), np.mean(cont_up)
#return freq_y, fft_y_real, fft_y_imag, freq_bot, fft_bot, np.mean(cont)
def MD_VibrBot_DispUp_ConstP_ConfigRec(N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, F0_up, fn):
dt = D0[0]/40
B = 0.1 # damping coefficient
Nt = int(5e6)
nt_rec = np.linspace(Nt-5e4, Nt, 500)
#Nt = int(1e4)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
ind_nt = 0
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
y_up0 = y_ini[N]
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
cont_up = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
#y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))
vx = np.zeros(N+1)
vy = np.zeros(N+1)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
if nt == nt_rec[ind_nt]:
ConfigPlot_YFixed_rec(N, x[0:N], y[0:N], D0[0:N], L[0], y[N], y_bot[nt], m0[0:N], ind_nt, fn)
ind_nt += 1
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N])
F_up[nt] = Fup_now+F0_up
Ep[nt] = Ep_now+(y_up0-y[N])*F0_up
cont[nt] = cont_now
cont_up[nt] = cont_up_now
p[nt] = p_now
Fx_all = np.append(Fx,0)-B*vx
Fy_all = np.append(Fy, F_up[nt])-B*vy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
y_up = y_up-y_up0
freq_y, fft_y = FFT_Fup(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
freq_bot, fft_bot = FFT_Fup(int(Nt/2), y_bot[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_y, fft_y, freq_bot, fft_bot, np.mean(cont), np.mean(cont_up)
def MD_VibrBot_DispUp_ConstP_EkCheck(Nt, mark_upDS, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, F0_up):
dt = D0[0]/40
B = 0.1 # damping coefficient
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
Ek_now = np.array(0)
Ek_up_now = np.array(0)
Ep_now = np.array(0)
Ep_up_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ep_up = np.zeros(Nt)
Ek = np.zeros(Nt)
Ek_up = np.zeros(Nt)
y_up0 = y_ini[N]
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
cont_up = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vx = np.zeros(N+1)
vy = np.zeros(N+1)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
if mark_upDS == 1:
ind_up = np.zeros(N)
for ii in np.arange(N):
d_up = y[N]-y[ii]
r_now = 0.5*D0[ii]
if d_up<r_now:
ind_up[ii] = 1
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
if mark_upDS == 0:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N])
elif mark_upDS == 1:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed_upDS(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N], ind_up)
F_up[nt] = Fup_now+F0_up
Ep[nt] = Ep_now+(y_up0-y[N])*F0_up
Ep_up[nt] = (y_up0-y[N])*F0_up
cont[nt] = cont_now
cont_up[nt] = cont_up_now
p[nt] = p_now
Fx_all = np.append(Fx,0)-B*vx
Fy_all = np.append(Fy, F_up[nt])-B*vy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_up[nt] = 0.5*m0[N]*(vx[N]**2+vy[N]**2)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ek_up_now = np.append(Ek_up_now, np.mean(Ek_up[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
Ep_up_now = np.append(Ep_up_now, np.mean(Ep_up[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
y_up = y_up-np.mean(y_up)
y_up = y_up/np.mean(np.absolute(y_up))
freq_y, fft_y = FFT_Fup(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
freq_bot, fft_bot = FFT_Fup(int(Nt/2), y_bot[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_y, fft_y, freq_bot, fft_bot, np.mean(cont), np.mean(cont_up)
#@jit
def force_YFixed_collision_ConstP(beta, Fx, Fy, N, x, y, vx, vy, D, Lx, y_bot, v_bot, y_up):
Fup = 0
Fbot = 0
Ep = 0
cont = 0
cont_up = 0
p_now = 0
#betta = 1
for nn in np.arange(N):
d_up = y_up-y[nn]
d_bot = y[nn]-y_bot
r_now = 0.5*D[nn]
if d_up<r_now:
F = -(1-d_up/r_now)/(r_now)
Fup -= F
Fy[nn] += F
dvy = vy[N]-vy[nn]
FD = beta*dvy
#FD = np.absolute(FD)
Fy[nn] += FD
Fup -= FD
Ep += (1/2)*(1-d_up/r_now)**2
cont_up += 1
cont += 1
#dbg.set_trace()
if d_bot<r_now:
F = -(1-d_bot/r_now)/(r_now)
Fbot += F
Fy[nn] -= F
dvy = v_bot-vy[nn]
FD = beta*dvy
Fy[nn] += FD
Ep += (1/2)*(1-d_bot/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < Dmn:
dx = x[mm]-x[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
dvx = vx[mm]-vx[nn]
dvy = vy[mm]-vy[nn]
FD = beta*(dvx*dx+dvy*dy)/dmn
#FD = np.absolute(FD)
Fx[nn] += FD*dx/dmn
Fx[mm] -= FD*dx/dmn
Fy[nn] += FD*dy/dmn
Fy[mm] -= FD*dy/dmn
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Fup, Fbot, Ep, cont, p_now, cont_up
def MD_VibrBot_DispUp_ConstP_EkCheck_Collision(beta, Nt, mark_upDS, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, F0_up, mark_norm):
dt = D0[0]/40
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
Ek_now = np.array(0)
Ek_up_now = np.array(0)
Ep_now = np.array(0)
Ep_up_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ep_up = np.zeros(Nt)
Ek = np.zeros(Nt)
Ek_up = np.zeros(Nt)
y_up0 = y_ini[N]
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
cont_up = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vy_bot = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
vx = np.zeros(N+1)
vy = np.zeros(N+1)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
if mark_upDS == 1:
ind_up = np.zeros(N)
for ii in np.arange(N):
d_up = y[N]-y[ii]
r_now = 0.5*D0[ii]
if d_up<r_now:
ind_up[ii] = 1
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed_collision_ConstP(beta, Fx, Fy, N, x, y, vx, vy, D0[0:N], L[0], y_bot[nt], vy_bot[nt], y[N])
F_up[nt] = Fup_now+F0_up
Ep[nt] = Ep_now+(y_up0-y[N])*F0_up
Ep_up[nt] = (y_up0-y[N])*F0_up
cont[nt] = cont_now
cont_up[nt] = cont_up_now
p[nt] = p_now
Fx_all = np.append(Fx,0)
Fy_all = np.append(Fy, F_up[nt])
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_up[nt] = 0.5*m0[N]*(vx[N]**2+vy[N]**2)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ek_up_now = np.append(Ek_up_now, np.mean(Ek_up[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
Ep_up_now = np.append(Ep_up_now, np.mean(Ep_up[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
y_up = y_up-np.mean(y_up)
if mark_norm == 1:
y_up = y_up/np.mean(np.absolute(y_up))
freq_y, fft_y = FFT_Fup(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
freq_bot, fft_bot = FFT_Fup(int(Nt/2), y_bot[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_y, fft_y, fft_bot, np.mean(cont), np.mean(cont_up), nt_rec[1:], Ek_now[1:],Ek_up_now[1:],Ep_now[1:],Ep_up_now[1:]
def MD_YFixed_ConstP_Gravity_SD(N, x0, y0, D0, m0, L, F0_up):
g = 1e-5
dt = D0[0]/40
Nt = int(5e6)
#Nt = int(1e4)
Ep = np.zeros(Nt)
F_up = np.zeros(Nt)
F_bot = np.zeros(Nt)
F_tot = np.zeros(Nt)
Fup_now = 0
y_up = y0[N]
vx = np.zeros(N+1)
vy = np.zeros(N+1)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], 0, y[N])
Fy -= g*m0[0:N]
F_up[nt] = Fup_now+F0_up-g*m0[N]
F_bot[nt] = Fbot_now
Ep[nt] = Ep_now+(y_up-y[N])*F0_up+sum(g*np.multiply(m0, y-y0))
vx = 0.1*np.divide(np.append(Fx,0), m0)
vy = 0.1*np.divide(np.append(Fy, F_up[nt]), m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
#print("nt=%d, Fup=%e, Fup_tot=%e\n" % (nt, Fup_now, F_up[nt]))
#dbg.set_trace()
t_end = time.time()
print ("F_tot=%.3e\n" %(F_tot[nt]))
print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
def MD_VibrBot_DispUp_ConstP_EkCheck_Gravity(Nt, mark_upDS, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, F0_up):
dt = D0[0]/40
#B = 0.1 # damping coefficient
g = 1e-5
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
Ek_now = np.array(0)
Ek_up_now = np.array(0)
Ep_now = np.array(0)
Ep_up_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ep_up = np.zeros(Nt)
Ek = np.zeros(Nt)
Ek_up = np.zeros(Nt)
y_up0 = y_ini[N]
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
cont_up = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vy_bot = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
vx = np.zeros(N+1)
vy = np.zeros(N+1)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
if mark_upDS == 1:
ind_up = np.zeros(N)
for ii in np.arange(N):
d_up = y[N]-y[ii]
r_now = 0.5*D0[ii]
if d_up<r_now:
ind_up[ii] = 1
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
#Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed(Fx, Fy, N, x[0:N], y[0:N], D0[0:N], L[0], y_bot[nt], y[N])
beta = 1
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed_collision_ConstP(beta, Fx, Fy, N, x, y, vx, vy, D0[0:N], L[0], y_bot[nt], vy_bot[nt], y[N])
F_up[nt] = Fup_now+F0_up
Ep[nt] = Ep_now+(y_up0-y[N])*F0_up+sum(g*np.multiply(m0, y-y_ini))
Ep_up[nt] = (y_up0-y[N])*F0_up
cont[nt] = cont_now
cont_up[nt] = cont_up_now
p[nt] = p_now
#Fx_all = np.append(Fx,0)-B*vx
#Fy_all = np.append(Fy, F_up[nt])-B*vy-g*m0
Fx_all = np.append(Fx,0)
Fy_all = np.append(Fy, F_up[nt])-g*m0
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_up[nt] = 0.5*m0[N]*(vx[N]**2+vy[N]**2)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ek_up_now = np.append(Ek_up_now, np.mean(Ek_up[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
Ep_up_now = np.append(Ep_up_now, np.mean(Ep_up[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
y_up = y_up-np.mean(y_up)
#y_up = y_up/np.mean(np.absolute(y_up))
freq_y, fft_y = FFT_Fup(int(Nt/2), y_up[int(Nt/2):Nt], dt, Freq_Vibr)
freq_bot, fft_bot = FFT_Fup(int(Nt/2), y_bot[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_y, fft_y, fft_bot, np.mean(cont), np.mean(cont_up), nt_rec[1:], Ek_now[1:],Ek_up_now[1:],Ep_now[1:],Ep_up_now[1:]
def MD_YFixed_ConstV_SP_SD(Nt, N, x0, y0, D0, m0, Lx, Ly):
dt = D0[0]/40
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Ep[nt] = Ep_now
vx = 0.1*np.divide(Fx, m0)
vy = 0.1*np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
t_end = time.time()
print ("F_tot=%.3e" %(F_tot[nt]))
print ("time=%.3e" %(t_end-t_start))
plt.figure(figsize=(6.4,4.8))
plt.plot(range(Nt), F_tot[0:Nt], color='blue')
ax = plt.gca()
ax.set_yscale('log')
plt.xlabel("t")
plt.ylabel("F_total")
plt.title("Finding the Equilibrium", fontsize='small')
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.tight_layout()
plt.show()
return x, y, p_now
def MD_YFixed_ConstV_SP_SD_2(Nt, N, x0, y0, D0, m0, Lx, Ly):
dt = D0[0]/40
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Ep[nt] = Ep_now
vx = 0.1*np.divide(Fx, m0)
vy = 0.1*np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
# putting a threshold on total force
if (F_tot[nt]<1e-11):
break
t_end = time.time()
#print ("F_tot=%.3e" %(F_tot[nt]))
#print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
#@jit
def force_YFixed_collision_ConstV(beta, Fx, Fy, N, x, y, vx, vy, D, Lx, y_bot, y_up):
Ep = 0
cont = 0
p_now = 0
for nn in np.arange(N):
d_up = y_up-y[nn]
d_bot = y[nn]-y_bot
r_now = 0.5*D[nn]
if d_up<r_now:
F = -(1-d_up/r_now)/(r_now)
Fy[nn] += F
dvy = -vy[nn]
FD = beta*dvy
Fy[nn] += FD
Ep += (1/2)*(1-d_up/r_now)**2
cont += 1
#dbg.set_trace()
if d_bot<r_now:
F = -(1-d_bot/r_now)/(r_now)
Fy[nn] -= F
dvy = -vy[nn]
FD = beta*dvy
Fy[nn] += FD
Ep += (1/2)*(1-d_bot/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < Dmn:
dx = x[mm]-x[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
dvx = vx[mm]-vx[nn]
dvy = vy[mm]-vy[nn]
FD = beta*(dvx*dx+dvy*dy)/dmn
#FD = np.absolute(FD)
Fx[nn] += FD*dx/dmn
Fx[mm] -= FD*dx/dmn
Fy[nn] += FD*dy/dmn
Fy[mm] -= FD*dy/dmn
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Ep, cont, p_now
def MD_VibrSP_ConstV_Collision(beta, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in, ind_out, mark_vibrY):
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
vx_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
elif mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
vy_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
vx[ind_in] = vx_in[nt]
vy[ind_in] = 0
elif mark_vibrY == 1:
x[ind_in] = x_ini[ind_in]
y[ind_in] = y_in[nt]
vx[ind_in] = 0
vy[ind_in] = vy_in[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_YFixed_collision_ConstV(beta, Fx, Fy, N, x, y, vx, vy, D0, L[0], 0, L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx
Fy_all = Fy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = np.array(0)
Ep_now = np.array(0)
cont_now = np.array(0)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now = np.append(cont_now, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
cont_now[0] = cont_now[1]
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
if mark_vibrY == 0:
freq_fft, fft_in = FFT_Fup(int(Nt/2), x_in[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in = FFT_Fup(int(Nt/2), y_in[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_fft, fft_in, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV(B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in, ind_out, mark_vibrY, mark_resonator):
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
elif mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
elif mark_vibrY == 1:
x[ind_in] = x_ini[ind_in]
y[ind_in] = y_in[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = np.array(0)
Ep_now = np.array(0)
cont_now = np.array(0)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now = np.append(cont_now, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
cont_now[0] = cont_now[1]
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
if mark_vibrY == 0:
freq_fft, fft_in = FFT_Fup(int(Nt/2), x_in[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in = FFT_Fup(int(Nt/2), y_in[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
if Nt == 5e5:
print(x[ind_out], y[ind_out])
print(fft_x_out[100], fft_y_out[100])
print(fft_in[100])
return freq_fft, fft_in, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_Periodic_ConstV_SP_SD(Nt, N, x0, y0, D0, m0, Lx, Ly):
dt = D0[0]/40
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, Lx, Ly)
Ep[nt] = Ep_now
vx = 0.1*np.divide(Fx, m0)
vy = 0.1*np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
t_end = time.time()
print ("F_tot=%.3e\n" %(F_tot[nt]))
print ("nt=%e, time=%.3e" %(nt, t_end-t_start))
return x, y, p_now
def MD_Periodic_equi_Ekcheck(Nt, N, x0, y0, D0, m0, L, T_set, V_em, n_em):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
vx_rec = np.zeros([Nt, N])
vy_rec = np.zeros([Nt, N])
nt_rec = np.linspace(0, Nt, int(Nt/1e3)+1)
#nt_rec = np.linspace(0, Nt, int(Nt/1e2)+1)
nt_rec = nt_rec.astype(int)
Ek_now = np.array(0)
Ep_now = np.array(0)
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx_rec[nt] = vx
vy_rec[nt] = vy
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
freq_now, fft_now = FFT_vCorr(int(Nt/2), N, vx_rec[int(Nt/2):Nt], vy_rec[int(Nt/2):Nt], dt)
return freq_now, fft_now, np.mean(cont), nt_rec, Ek_now, Ep_now
#@jit
def force_Xfixed(Fx, Fy, N, x, y, D, x_l, x_r, Ly, ind_wall):
F_l = 0
F_r = 0
Ep = 0
cont = 0
p_now = 0
for nn in np.arange(N):
d_l = x[nn]-x_l
d_r = x_r-x[nn]
r_now = 0.5*D[nn]
if (ind_wall[nn]==0) and (d_r<r_now):
F = -(1-d_r/r_now)/(r_now)
F_r -= F
Fx[nn] += F
Ep += (1/2)*(1-d_r/r_now)**2
cont += 1
#dbg.set_trace()
if (ind_wall[nn]==0) and (d_l<r_now):
F = -(1-d_l/r_now)/(r_now)
F_l += F
Fx[nn] -= F
Ep += (1/2)*(1-d_l/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dx = x[mm]-x[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dx) < Dmn:
dy = y[mm]-y[nn]
dy = dy-round(dy/Ly)*Ly
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, F_l, F_r, Ep, cont, p_now
def MD_Xfixed_SD(Nt, N, x0, y0, D0, m0, Lx, Ly, ind_wall):
wall = np.where(ind_wall>0)
dt = D0[0]/40
Nt = int(Nt)
#Nt = int(5e6)
#Nt = int(5e2)
Ep = np.zeros(Nt)
F_l = np.zeros(Nt)
F_r = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fl_now, Fr_now, Ep_now, cont_now, p_now = force_Xfixed(Fx, Fy, N, x, y, D0, 0, Lx, Ly, ind_wall)
F_l[nt] = Fl_now
F_r[nt] = Fr_now
Ep[nt] = Ep_now
Fx[wall] = 0
Fy[wall] = 0
vx = 0.1*np.divide(Fx, m0)
vy = 0.1*np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
#print("nt=%d, Fup=%e, Fup_tot=%e\n" % (nt, Fup_now, F_up[nt]))
#dbg.set_trace()
t_end = time.time()
print ("F_tot=%.3e" %(F_tot[nt]))
#print ("Ep_tot=%.3e\n" %(Ep[nt]))
print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
def MD_VibrWall_DiffP_Xfixed(Nt, N, x_ini, y_ini,D0, m0, Lx, Ly, Freq_Vibr, Amp_Vibr, ind_wall, B):
dt = D0[0]/40
# B damping coefficient
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
Ek_now = np.array(0)
Ep_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
x_l = np.zeros(Nt)
F_r = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_l = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
wall_l = np.where(ind_wall==1)
wall_r = np.where(ind_wall==2)
wall = np.where(ind_wall>0)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
x[wall_l] = x_l[nt]
Fx, Fy, Fl_now, Fr_now, Ep_now, cont_now, p_now = force_Xfixed(Fx, Fy, N, x, y, D0, x_l[nt], Lx, Ly, ind_wall)
F_r[nt] = Fr_now+sum(Fx[wall_r])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax[wall] = 0
ay[wall] = 0
vx[wall] = 0
vy[wall] = 0
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
#for ii in np.arange(len(nt_rec)-1):
# Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
# Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
#CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
freq_fft, fft_receive = FFT_Fup(int(Nt/2), F_r[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_drive = FFT_Fup(int(Nt/2), x_l[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_fft, fft_receive, fft_drive, cont_now, nt_rec, Ek_now, Ep_now
#@jit
def force_XFixed_collision_VibrLx(beta, Fx, Fy, N, x, y, vx, vy, D, x_l, Lx, Ly, vx_l, ind_wall):
Fr = 0
Fl = 0
Ep = 0
cont = 0
p_now = 0
#betta = 1
for nn in np.arange(N):
if ind_wall[nn] == 0:
d_r = Lx-x[nn]
d_l = x[nn]-x_l
r_now = 0.5*D[nn]
if d_r<r_now:
F = -(1-d_r/r_now)/(r_now)
Fr -= F
Fx[nn] += F
dvx = -vx[nn]
FD = beta*dvx
Fx[nn] += FD
Fr -= FD
Ep += (1/2)*(1-d_r/r_now)**2
cont += 1
#dbg.set_trace()
if d_l<r_now:
F = -(1-d_l/r_now)/(r_now)
Fl += F
Fx[nn] -= F
dvx = vx_l-vx[nn]
FD = beta*dvx
Fx[nn] += FD
Ep += (1/2)*(1-d_l/r_now)**2
cont += 1
for mm in np.arange(nn+1, N):
dx = x[mm]-x[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dx) < Dmn:
dy = y[mm]-y[nn]
dy = dy-round(dy/Ly)*Ly
dmn = np.sqrt(dx**2+dy**2)
if dmn < Dmn:
F = -(1-dmn/Dmn)/Dmn/dmn
Fx[nn] += F*dx
Fx[mm] -= F*dx
Fy[nn] += F*dy
Fy[mm] -= F*dy
dvx = vx[mm]-vx[nn]
dvy = vy[mm]-vy[nn]
FD = beta*(dvx*dx+dvy*dy)/dmn
#FD = np.absolute(FD)
Fx[nn] += FD*dx/dmn
Fx[mm] -= FD*dx/dmn
Fy[nn] += FD*dy/dmn
Fy[mm] -= FD*dy/dmn
Ep += (1/2)*(1-dmn/Dmn)**2
cont += 1
p_now += (-F)*(dx**2+dy**2)
return Fx, Fy, Fl, Fr, Ep, cont, p_now
def MD_VibrWall_DiffP_Xfixed_Collision(Nt, N, x_ini, y_ini,D0, m0, Lx, Ly, Freq_Vibr, Amp_Vibr, ind_wall, beta):
dt = D0[0]/40
# B damping coefficient
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
Ek_now = np.array(0)
Ep_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
x_l = np.zeros(Nt)
F_r = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_l = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vx_l = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
wall_l = np.where(ind_wall==1)
wall_r = np.where(ind_wall==2)
wall = np.where(ind_wall>0)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
x[wall_l] = x_l[nt]
vx[wall_l] = vx_l[nt]
Fx, Fy, Fl_now, Fr_now, Ep_now, cont_now, p_now = force_XFixed_collision_VibrLx(beta, Fx, Fy, N, x, y, vx, vy, D0, x_l[nt], Lx, Ly, vx_l[nt], ind_wall)
F_r[nt] = Fr_now+sum(Fx[wall_r])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx
Fy_all = Fy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax[wall] = 0
ay[wall] = 0
vx[wall] = 0
vy[wall] = 0
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
freq_fft, fft_receive = FFT_Fup(int(Nt/2), F_r[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_drive = FFT_Fup(int(Nt/2), x_l[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_fft, fft_receive, fft_drive, cont_now, nt_rec, Ek_now, Ep_now
def MD_VibrWall_LySignal_Collision(Nt, N, x_ini, y_ini,D0, m0, Lx0, Ly0, Freq_Vibr, Amp_Vibr, ind_wall, beta, dLy_scheme, num_gap):
dt = D0[0]/40
# B damping coefficient
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
dLy_max = 0.1
nt_transition = int(Nt/num_gap/20)
dLy_inc = np.linspace(0, dLy_max, nt_transition)
dLy_dec = np.linspace(dLy_max, 0, nt_transition)
if dLy_scheme == 0:
dLy_all = np.zeros(Nt)
elif dLy_scheme == 1:
dLy_all = np.ones(Nt)*dLy_max
dLy_all[0:nt_transition] = dLy_inc
elif dLy_scheme == 2:
dLy_all = np.zeros(Nt)
nt_Ly = np.linspace(0, Nt, num_gap+1)
nt_Ly = nt_Ly.astype(int)
for ii in np.arange(1, num_gap):
nt1 = nt_Ly[ii]-int(nt_transition/2)
nt2 = nt_Ly[ii]+int(nt_transition/2)
if ii%2 == 1:
dLy_all[nt_Ly[ii]:nt_Ly[ii+1]] = dLy_max
dLy_all[nt1:nt2] = dLy_inc
else:
dLy_all[nt1:nt2] = dLy_dec
nt_rec = np.linspace(0, Nt, int(Nt/5e4*num_gap/5)+1)
Ek_now = np.array(0)
Ep_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
x_l = np.zeros(Nt)
F_r = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_l = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vx_l = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
wall_l = np.where(ind_wall==1)
wall_r = np.where(ind_wall==2)
wall = np.where(ind_wall>0)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
for nt in np.arange(Nt):
Ly = Ly0+dLy_all[nt]
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
x[wall_l] = x_l[nt]
vx[wall_l] = vx_l[nt]
Fx, Fy, Fl_now, Fr_now, Ep_now, cont_now, p_now = force_XFixed_collision_VibrLx(beta, Fx, Fy, N, x, y, vx, vy, D0, x_l[nt], Lx0, Ly, vx_l[nt], ind_wall)
F_r[nt] = Fr_now+sum(Fx[wall_r])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx
Fy_all = Fy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax[wall] = 0
ay[wall] = 0
vx[wall] = 0
vy[wall] = 0
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
nt_dLy = np.arange(0, Nt, 100)
return nt_dLy, dLy_all[nt_dLy], F_r, nt_rec, Ek_now, Ep_now
def MD_VibrWall_LySignal(Nt, N, x_ini, y_ini,D0, m0, Lx0, Ly0, Freq_Vibr, Amp_Vibr, ind_wall, B, dLy_scheme, num_gap):
dt = D0[0]/40
# B damping coefficient
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
dLy_max = 0.1
nt_transition = int(Nt/num_gap/20)
dLy_inc = np.linspace(0, dLy_max, nt_transition)
dLy_dec = np.linspace(dLy_max, 0, nt_transition)
if dLy_scheme == 0:
dLy_all = np.zeros(Nt)
elif dLy_scheme == 1:
dLy_all = np.ones(Nt)*dLy_max
dLy_all[0:nt_transition] = dLy_inc
elif dLy_scheme == 2:
dLy_all = np.zeros(Nt)
nt_Ly = np.linspace(0, Nt, num_gap+1)
nt_Ly = nt_Ly.astype(int)
for ii in np.arange(1, num_gap):
nt1 = nt_Ly[ii]-int(nt_transition/2)
nt2 = nt_Ly[ii]+int(nt_transition/2)
if ii%2 == 1:
dLy_all[nt_Ly[ii]:nt_Ly[ii+1]] = dLy_max
dLy_all[nt1:nt2] = dLy_inc
else:
dLy_all[nt1:nt2] = dLy_dec
nt_rec = np.linspace(0, Nt, int(Nt/5e4*num_gap/5)+1)
Ek_now = np.array(0)
Ep_now = np.array(0)
#nt_rec = np.linspace(0.5*Nt, Nt, 50)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
x_l = np.zeros(Nt)
F_r = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_l = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vx_l = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
wall_l = np.where(ind_wall==1)
wall_r = np.where(ind_wall==2)
wall = np.where(ind_wall>0)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
for nt in np.arange(Nt):
Ly = Ly0+dLy_all[nt]
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
x[wall_l] = x_l[nt]
vx[wall_l] = vx_l[nt]
Fx, Fy, Fl_now, Fr_now, Ep_now, cont_now, p_now = force_Xfixed(Fx, Fy, N, x, y, D0, x_l[nt], Lx0, Ly, ind_wall)
F_r[nt] = Fr_now+sum(Fx[wall_r])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax[wall] = 0
ay[wall] = 0
vx[wall] = 0
vy[wall] = 0
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
nt_dLy = np.arange(0, Nt, 100)
return nt_dLy, dLy_all[nt_dLy], F_r, nt_rec, Ek_now, Ep_now
def MD_VibrBot_FSignal_Collision(beta, Nt, N, x_ini, y_ini, D0, m0, Lx, Freq_Vibr, Amp_Vibr, F_scheme, num_gap):
dt = D0[0]/40
Nt = int(Nt)
#Nt = int(5e7)
#Nt = int(1e4)
F_max = 0.01
F_min = 1e-8
nt_transition = int(Nt/num_gap/20)
F_inc = np.linspace(F_min, F_max, nt_transition)
F_dec = np.linspace(F_max, F_min, nt_transition)
if F_scheme == 1:
F_all = np.ones(Nt)*F_max
elif F_scheme == 0:
F_all = np.ones(Nt)*F_min
F_all[0:nt_transition] = F_dec
elif F_scheme == 2:
F_all = np.ones(Nt)*F_max
nt_F = np.linspace(0, Nt, num_gap+1)
nt_F = nt_F.astype(int)
for ii in np.arange(1, num_gap):
nt1 = nt_F[ii]-int(nt_transition/2)
nt2 = nt_F[ii]+int(nt_transition/2)
if ii%2 == 1:
F_all[nt_F[ii]:nt_F[ii+1]] = F_min
F_all[nt1:nt2] = F_dec
else:
F_all[nt1:nt2] = F_inc
nt_rec = np.linspace(0, Nt, int(Nt/5e4*num_gap/5)+1)
Ek_now = np.array(0)
Ep_now = np.array(0)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
y_up = np.zeros(Nt)
F_up = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
y_bot = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)+Amp_Vibr
vy_bot = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt)+1.5*np.pi)
vx = np.zeros(N+1)
vy = np.zeros(N+1)
ax_old = np.zeros(N+1)
ay_old = np.zeros(N+1)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
y_up[nt] = y[N]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up_now = force_YFixed_collision_ConstP(beta, Fx, Fy, N, x, y, vx, vy, D0[0:N], Lx, y_bot[nt], vy_bot[nt], y[N])
F_up[nt] = Fup_now-F_all[nt]
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = np.append(Fx,0)
Fy_all = np.append(Fy, F_up[nt])
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek_up = 0.5*m0[N]*(vx[N]**2+vy[N]**2)
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))-Ek_up
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
print ("freq=%f, cont_min=%d, cont_max=%d, cont_ave=%f\n" %(Freq_Vibr, min(cont), max(cont), np.mean(cont)))
nt_F = np.arange(0, Nt, 100)
return nt_F, F_all[nt_F], y_up, nt_rec, Ek_now, Ep_now
def MD_SPSignal(mark_collision, beta, Nt, N, x_ini, y_ini,D0, m0, Lx, Ly, Freq_Vibr, Amp_Vibr, ind_in, ind_out, ind_fix, dr_scheme, num_gap, mark_vibrY, dr_one, dr_two):
dt = D0[0]/40
Nt = int(Nt)
d_ini = D0[0]
d0 = 0.1
dr_all = np.zeros(Nt)+dr_one
if abs(dr_scheme) <= 2:
nt_dr = np.linspace(0, Nt, 3)
nt_dr = nt_dr.astype(int)
dr_all[nt_dr[1]:nt_dr[2]] = dr_two
num_gap = 5
elif dr_scheme == 3 or dr_scheme == 4:
nt_dr = np.linspace(0, Nt, num_gap+1)
nt_dr = nt_dr.astype(int)
for ii in np.arange(1, num_gap, 2):
dr_all[nt_dr[ii]:nt_dr[ii+1]] = dr_two
D_fix = d_ini+dr_all*d_ini
nt_rec = np.linspace(0, Nt, int(Nt/5e4*num_gap/5)+1)
Ek_rec = np.array(0)
Ep_rec = np.array(0)
cont_rec = np.array(0)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
vy_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
else:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
vx_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
D0[ind_fix] = D_fix[nt]
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 1:
y[ind_in] = y_in[nt]
x[ind_in] = x_ini[ind_in]
vy[ind_in] = vy_in[nt]
vx[ind_in] = 0
else:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
vx[ind_in] = vx_in[nt]
vy[ind_in] = 0
Fx = np.zeros(N)
Fy = np.zeros(N)
if mark_collision == 1:
Fx, Fy, Ep_now, cont_now, p_now = force_YFixed_collision_ConstV(beta, Fx, Fy, N, x, y, vx, vy, D0, Lx, 0, Ly)
Fx_all = Fx
Fy_all = Fy
else:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Fx_all = Fx-beta*vx
Fy_all = Fy-beta*vy
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if nt % 2000 == 0:
print ("nt = %d, Ek = %.2e, cont = %.2e" %(nt, Ek[nt], cont[nt]))
for ii in np.arange(len(nt_rec)-1):
Ek_rec = np.append(Ek_rec, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec = np.append(Ep_rec, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec = np.append(cont_rec, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
print ("freq=%f, cont_min=%d, cont_max=%d, cont_ave=%f\n" %(Freq_Vibr, min(cont), max(cont), np.mean(cont)))
nt_dr = np.arange(0, Nt, 100)
if mark_vibrY == 1:
xy_out = y_out
else:
xy_out = x_out
return nt_dr, dr_all[nt_dr], xy_out, nt_rec, Ek_rec, Ep_rec, cont_rec
def MD_YFixed_equi_SP_modecheck(Nt, N, x0, y0, D0, m0, Lx, Ly, T_set, V_em, n_em, ind_out):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
Freq_Vibr = 0
freq_x, fft_x = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_y, fft_y = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
ind1 = freq_x<30
ind2 = freq_y<30
return freq_x[ind1], freq_y[ind2], fft_x[ind1], fft_y[ind2], np.mean(cont), nt_rec, Ek_rec, Ep_rec, cont_rec
def MD_YFixed_SPVibr_SP_modecheck(Nt, N, x0, y0, D0, m0, Lx, Ly, T_set, ind_in, ind_out, mark_vibrY):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = np.array(0)
Ep_rec = np.array(0)
cont_rec = np.array(0)
vx = np.zeros(N)
vy = np.zeros(N)
if mark_vibrY == 1:
vy[ind_in] = 1
vy_mc = sum(np.multiply(vy,m0))/sum(m0)
vy = vy-vy_mc
else:
vx[ind_in] = 1
vx_mc = sum(np.multiply(vx,m0))/sum(m0)
vx = vx-vx_mc
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if mark_vibrY == 1:
vy = vy*np.sqrt(N*T_set/T_rd)
print("|vy|_Max=%.3e, |vy|_Min=%.3e" %(max(abs(vy)), min(abs(vy))))
else:
vx = vx*np.sqrt(N*T_set/T_rd)
print("|vx|_Max=%.3e, |vx|_Min=%.3e" %(max(abs(vx)), min(abs(vx))))
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
mark_CB = 0
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Ep[nt] = Ep_now
cont[nt] = cont_now
if mark_CB == 0 and cont_now<cont[0]:
print("nt_CB=%d" % nt)
mark_CB = 1
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_rec = np.append(Ek_rec, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec = np.append(Ep_rec, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec = np.append(cont_rec, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
Freq_Vibr = 0
freq_x, fft_x = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_y, fft_y = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
ind1 = freq_x<30
ind2 = freq_y<30
return freq_x[ind1], freq_y[ind2], fft_x[ind1], fft_y[ind2], cont_rec, nt_rec, Ek_rec, Ep_rec
#181105
def MD_YFixed_SPVibr_vCorr_modecheck(Nt_MD, Nt_FFT, N, x0, y0, D0, m0, Lx, Ly, T_set, ind_in, ind_out, mark_vibrY):
N = int(N)
Nt_FFT = int(Nt_FFT)
Nt_MD = int(Nt_MD)
dt = min(D0)/40
Nt = Nt_MD+Nt_FFT
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
mark_FFT = np.zeros(Nt)
mark_FFT[Nt_MD:Nt] = 1
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
vx = np.zeros(N)
vy = np.zeros(N)
if mark_vibrY == 1:
vy[ind_in] = 1
vy_mc = sum(np.multiply(vy,m0))/sum(m0)
vy = vy-vy_mc
else:
vx[ind_in] = 1
vx_mc = sum(np.multiply(vx,m0))/sum(m0)
vx = vx-vx_mc
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if mark_vibrY == 1:
vy = vy*np.sqrt(N*T_set/T_rd)
print("|vy|_Max=%.3e, |vy|_Min=%.3e" %(max(abs(vy)), min(abs(vy))))
else:
vx = vx*np.sqrt(N*T_set/T_rd)
print("|vx|_Max=%.3e, |vx|_Min=%.3e" %(max(abs(vx)), min(abs(vx))))
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if mark_FFT[nt] == 1:
if mark_FFT[nt-1] == 0:
nt_ref = nt
vx_rec = np.zeros([Nt_FFT, N])
vy_rec = np.zeros([Nt_FFT, N])
nt_delta = nt-nt_ref
vx_rec[nt_delta] = vx
vy_rec[nt_delta] = vy
if nt_delta == Nt_FFT-1:
freq_now, fft_now = FFT_vCorr(Nt_FFT, N, vx_rec, vy_rec, dt)
print ("Nt_End="+str(nt))
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f" %(CB_ratio))
return freq_now, fft_now, (nt_rec[:-1]+nt_rec[1:])/2, Ek_rec, Ep_rec, cont_rec
def MD_YFixed_ConstV(B, Nt, N, x0, y0, D0, m0, Lx, Ly):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/1e3)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
mark_CB = 0
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Fx = Fx-B*vx
Fy = Fy-B*vy
Ep[nt] = Ep_now
cont[nt] = cont_now
if mark_CB == 0 and cont_now<cont[0]:
print("nt_CB=%d" % nt)
mark_CB = 1
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[0:-1]+nt_rec[1:])/2
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
print ("Ek_last=%.3e" % Ek[-1])
return x, y, nt_rec, Ek_rec, Ep_rec, cont_rec
def MD_Vibr3Part_ConstV(B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in_all, ind_out, mark_vibrY, eigen_mode_now):
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
num_in = ind_in_all.size
Phase_Vibr = np.sin(Freq_Vibr*dt*np.arange(Nt))
Amp_Vibr_all = np.zeros(num_in)
for i_in in np.arange(num_in):
ind_in = ind_in_all[i_in]
if mark_vibrY == 0:
Amp_Vibr_all[i_in] = eigen_mode_now[2*ind_in]
elif mark_vibrY == 1:
Amp_Vibr_all[i_in] = eigen_mode_now[2*ind_in+1]
print(ind_in_all)
print(Amp_Vibr_all)
Amp_Vibr_all = Amp_Vibr_all*Amp_Vibr/max(np.abs(Amp_Vibr_all))
print(Amp_Vibr_all)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
for i_in in np.arange(num_in):
ind_in = ind_in_all[i_in]
if mark_vibrY == 0:
x[ind_in] = Phase_Vibr[nt]*Amp_Vibr_all[i_in]+x_ini[ind_in]
y[ind_in] = y_ini[ind_in]
elif mark_vibrY == 1:
x[ind_in] = x_ini[ind_in]
y[ind_in] = Phase_Vibr[nt]*Amp_Vibr_all[i_in]+y_ini[ind_in]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = np.array(0)
Ep_now = np.array(0)
cont_now = np.array(0)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now = np.append(cont_now, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
cont_now[0] = cont_now[1]
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
if mark_vibrY == 0:
x_in = Phase_Vibr*Amp_Vibr
freq_fft, fft_in = FFT_Fup(int(Nt/2), x_in[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
y_in = Phase_Vibr*Amp_Vibr
freq_fft, fft_in = FFT_Fup(int(Nt/2), y_in[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
return freq_fft, fft_in, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_dPhiSignal(mark_collision, beta, Nt, N, x_ini, y_ini, d0, phi0, m0, Lx, Ly, Freq_Vibr, Amp_Vibr, ind_in, ind_out, dphi_scheme, dphi_on, dphi_off, num_gap, mark_vibrY):
dt = d0/40
Nt = int(Nt)
if dphi_scheme == 1:
nt_dphi = np.linspace(0, Nt, 3)
nt_dphi = nt_dphi.astype(int)
dphi_all = np.zeros(Nt)+dphi_on
dphi_all[nt_dphi[1]:nt_dphi[2]] = dphi_off
elif dphi_scheme == -1:
nt_dphi = np.linspace(0, Nt, 3)
nt_dphi = nt_dphi.astype(int)
dphi_all = np.zeros(Nt)+dphi_off
dphi_all[nt_dphi[1]:nt_dphi[2]] = dphi_on
else:
dphi_all = np.zeros(Nt)+dphi_on
nt_dphi = np.linspace(0, Nt, num_gap+1)
nt_dphi = nt_dphi.astype(int)
for ii in np.arange(1, num_gap, 2):
dphi_all[nt_dphi[ii]:nt_dphi[ii+1]] = dphi_off
D_ini = np.zeros(N)+d0
nt_rec = np.linspace(0, Nt, int(Nt/5e4*num_gap/5)+1)
Ek_rec = np.array(0)
Ep_rec = np.array(0)
cont_rec = np.array(0)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
vy_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
else:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
vx_in = Amp_Vibr*Freq_Vibr*np.cos(Freq_Vibr*dt*np.arange(Nt))
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
D0 = D_ini*np.sqrt(1+dphi_all[nt]/phi0)
#if np.mod(nt,100000) == 0:
#print(D0[3])
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 1:
y[ind_in] = y_in[nt]
x[ind_in] = x_ini[ind_in]
vy[ind_in] = vy_in[nt]
vx[ind_in] = 0
else:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
vx[ind_in] = vx_in[nt]
vy[ind_in] = 0
Fx = np.zeros(N)
Fy = np.zeros(N)
if mark_collision == 1:
Fx, Fy, Ep_now, cont_now, p_now = force_YFixed_collision_ConstV(beta, Fx, Fy, N, x, y, vx, vy, D0, Lx, 0, Ly)
Fx_all = Fx
Fy_all = Fy
else:
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, Lx, 0, Ly)
Fx_all = Fx-beta*vx
Fy_all = Fy-beta*vy
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
for ii in np.arange(len(nt_rec)-1):
Ek_rec = np.append(Ek_rec, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec = np.append(Ep_rec, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec = np.append(cont_rec, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
print ("freq=%f, cont_min=%d, cont_max=%d, cont_ave=%f\n" %(Freq_Vibr, min(cont), max(cont), np.mean(cont)))
nt_dphi = np.arange(0, Nt, 100)
if mark_vibrY == 1:
xy_out = y_out
else:
xy_out = x_out
return nt_dphi, dphi_all[nt_dphi], xy_out, nt_rec[1:], Ek_rec[1:], Ep_rec[1:], cont_rec[1:]
def Damping_calc(Damp_scheme, B, N, x, y, vx, vy, Lx, Ly):
Fx_damp = np.zeros(N)
Fy_damp = np.zeros(N)
if Damp_scheme == 1:
Fx_damp = -B*vx
Fy_damp = -B*vy
if Damp_scheme == 2:
Fx_damp = -B*vx*np.abs(vx)*5e5
Fy_damp = -B*vy*np.abs(vy)*5e5
if Damp_scheme == 3:
Fx_damp = -B*vx/np.sqrt(np.abs(vx))*np.sqrt(2e-6)
Fy_damp = -B*vy/np.sqrt(np.abs(vy))*np.sqrt(2e-6)
if Damp_scheme == 4:
Fx_damp = -B*vx*np.exp(-5e4*np.abs(vx)+1)*0.1
Fy_damp = -B*vy*np.exp(-5e4*np.abs(vy)+1)*0.1
if Damp_scheme == 5:
Fx_damp = -B*vx*np.exp(-5e5*np.abs(vx)+1)
Fy_damp = -B*vy*np.exp(-5e5*np.abs(vy)+1)
if Damp_scheme == 6:
Fx_damp = -B*vx*np.exp(-5e6*np.abs(vx)+1)*10
Fy_damp = -B*vy*np.exp(-5e6*np.abs(vy)+1)*10
if Damp_scheme == 7:
Fx_damp = -B*vx*np.exp(-5e7*np.abs(vx)+1)*100
Fy_damp = -B*vy*np.exp(-5e7*np.abs(vy)+1)*100
return Fx_damp, Fy_damp
def Force_FixedPos_calc(k, N, x, y, x0, y0, D0, vx, vy, Lx, Ly):
Fx_damp = np.zeros(N)
Fy_damp = np.zeros(N)
Ep = 0
for nn in np.arange(N):
dy = y[nn]-y0[nn]
dy = dy-round(dy/Ly)*Ly
Dmn = 0.5*D0[nn]
dx = x[nn]-x0[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if (dmn > 0):
F = -k*(dmn/Dmn/Dmn)/dmn
Fx_damp[nn] += F*dx
Fy_damp[nn] += F*dy
Ep += (1/2)*k*(dmn/Dmn)**2
return Fx_damp, Fy_damp, Ep
def MD_FilterCheck_Periodic_Equi_vCorr(Nt_damp, Nt_FFT, num_period, Damp_scheme, B, N, x0, y0, D0, m0, L, T_set, V_em, n_em):
if Damp_scheme < 0:
return
N = int(N)
Nt_FFT = int(Nt_FFT)
Nt_damp = int(Nt_damp)
dt = min(D0)/40
Nt_period = int(2*Nt_damp+Nt_FFT)
Nt = Nt_period*num_period
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
mark_damp = np.zeros(Nt)
mark_FFT = np.zeros(Nt)
for ii in np.arange(num_period):
if ii > 0:
t1 = ii*Nt_period
t2 = t1+Nt_damp
mark_damp[t1:t2] = 1
t3 = ii*Nt_period+2*Nt_damp
t4 = t3+Nt_FFT
mark_FFT[t3:t4] = 1
nt_rec = np.linspace(0, Nt, int(Nt/1e3)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
num_FFT = 0
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
if mark_damp[nt] == 1:
Fx_damp, Fy_damp = Damping_calc(Damp_scheme, B, N, x, y, vx, vy, L[0], L[1])
Fx = Fx + Fx_damp
Fy = Fy + Fy_damp
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if mark_FFT[nt] == 1:
if mark_FFT[nt-1] == 0:
nt_ref = nt
vx_rec = np.zeros([Nt_FFT, N])
vy_rec = np.zeros([Nt_FFT, N])
nt_delta = nt-nt_ref
vx_rec[nt_delta] = vx
vy_rec[nt_delta] = vy
if nt_delta == Nt_FFT-1:
num_FFT += 1
freq_now, fft_now = FFT_vCorr(Nt_FFT, N, vx_rec, vy_rec, dt)
if num_FFT == 1:
fft_all = np.array([fft_now])
freq_all = np.array([freq_now])
len_fft_ref = len(fft_now)
len_freq_ref = len(freq_now)
else:
fft_add = np.zeros(len_fft_ref)
freq_add = np.zeros(len_freq_ref)
len_fft_now = len(fft_now)
len_freq_now = len(freq_now)
if len_fft_now >= len_fft_ref:
fft_add[0:len_fft_ref] = fft_now[0:len_fft_ref]
else:
fft_add[0:len_fft_now] = fft_now[0:len_fft_now]
fft_add[len_fft_now:] = fft_now[len_fft_now]
if len_freq_now >= len_freq_ref:
freq_add[0:len_freq_ref] = freq_now[0:len_freq_ref]
else:
freq_add[0:len_freq_now] = freq_now[0:len_freq_now]
freq_add[len_freq_now:] = freq_now[len_freq_now]
fft_all = np.append(fft_all, [fft_add], axis=0)
freq_all = np.append(freq_all, [freq_add], axis=0)
print("FFT_iteration: %d" % num_FFT)
print("Ek_ave: %e" %(np.mean(Ek[nt_ref:nt])))
ind1 = m0>5
ind2 = m0<5
print("|vx|_ave(heavy):%e" % np.mean(np.abs(vx[ind1])))
print("|vx|_ave(light):%e" % np.mean(np.abs(vx[ind2])))
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f" %(CB_ratio))
return freq_all, fft_all, (nt_rec[:-1]+nt_rec[1:])/2, Ek_rec, Ep_rec, cont_rec
def MD_FilterCheck_Periodic_Equi_vCorr_Seperate(Nt_damp, Nt_FFT, num_period, Damp_scheme, k, B, N, x0, y0, D0, m0, L, T_set, V_em, n_em):
# for damping scheme = -1 (fixed spring at initial position)
if Damp_scheme != -1:
return
N = int(N)
Nt_FFT = int(Nt_FFT)
Nt_damp = int(Nt_damp)
dt = min(D0)/40
Nt = Nt_damp*num_period+Nt_FFT
if num_period == 0:
Nt = Nt_damp+Nt_FFT
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
mark_FFT = np.zeros(Nt)
t1 = Nt_damp * num_period
if num_period == 0:
t1 = Nt_damp
t2 = t1 + Nt_FFT
mark_FFT[t1:t2] = 1
nt_rec = np.linspace(0, Nt, int(Nt/1e3)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
# always have damping exceot num_period = 0
if num_period > 0:
Fx_damp, Fy_damp, Ep_fix = Force_FixedPos_calc(k, N, x, y, x0, y0, D0, vx, vy, L[0], L[1])
if (B > 0):
Fx_damp += -B*vx
Fy_damp += -B*vy
elif num_period == 0:
Fx_damp = 0
Fy_damp = 0
Ep_fix = 0
Ep_now += Ep_fix
Fx = Fx + Fx_damp
Fy = Fy + Fy_damp
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if mark_FFT[nt] == 1:
if mark_FFT[nt-1] == 0:
nt_ref = nt
vx_rec = np.zeros([Nt_FFT, N])
vy_rec = np.zeros([Nt_FFT, N])
nt_delta = nt-nt_ref
vx_rec[nt_delta] = vx
vy_rec[nt_delta] = vy
if nt_delta == Nt_FFT-1:
freq_now, fft_now = FFT_vCorr(Nt_FFT, N, vx_rec, vy_rec, dt)
print ("Nt_End="+str(nt))
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f" %(CB_ratio))
return freq_now, fft_now, (nt_rec[:-1]+nt_rec[1:])/2, Ek_rec, Ep_rec, cont_rec
def MD_Periodic_Equi_vDistr(Nt_MD, Nt_rec, N, x0, y0, D0, m0, L, T_set, V_em, n_em):
N = int(N)
Nt_MD = int(Nt_MD)
Nt_rec = int(Nt_rec)
dt = min(D0)/40
Nt = Nt_MD+Nt_rec
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/1e3)+1)
nt_rec = nt_rec.astype(int)
Ek_rec = []
Ep_rec = []
cont_rec = []
ind1 = m0>5
ind2 = m0<5
vx_light = []
vx_heavy = []
vy_light = []
vy_heavy = []
vx = np.zeros(N)
vy = np.zeros(N)
for ii in np.arange(n_em):
ind1 = 2*np.arange(N)
ind2 = ind1+1
vx += V_em[ind1, ii]
vy += V_em[ind2, ii]
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if nt >= Nt_MD:
vx_light.extend(vx[ind2])
vy_light.extend(vy[ind2])
vx_heavy.extend(vx[ind1])
vy_heavy.extend(vy[ind1])
for ii in np.arange(len(nt_rec)-1):
Ek_rec.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_rec.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_rec.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f" %(CB_ratio))
return nt_rec, Ek_rec, Ep_rec, cont_rec, vx_light, vx_heavy, vy_light, vy_heavy
def Output_resonator_1D(Nt, x_drive, x0, m0, w0, dt):
dx = x_drive - x0
k = w0**2*m0
x = 0
vx = 0
ax_old = 0
Nt = int(Nt)
x_rec = np.zeros(Nt)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
x_rec[nt] = x
Fx = k*(dx[nt]-x)
ax = Fx/m0;
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
ax_old = ax;
freq_fft, fft_x_rec = FFT_Fup(int(Nt/2), x_rec[int(Nt/2):Nt], dt, w0)
return freq_fft, fft_x_rec
def MD_Periodic_vCorr(Nt, N, x0, y0, D0, m0, vx0, vy0, L, T_set):
dt = min(D0)/40
Nt = int(Nt)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
vx_rec = np.zeros([int(Nt/2), N])
vy_rec = np.zeros([int(Nt/2), N])
vx = vx0
vy = vy0
T_rd = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
vx = vx*np.sqrt(N*T_set/T_rd)
vy = vy*np.sqrt(N*T_set/T_rd)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, L[0], L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
ax = np.divide(Fx, m0);
ay = np.divide(Fy, m0);
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
if (nt >= Nt/2):
vx_rec[int(nt-Nt/2)] = vx
vy_rec[int(nt-Nt/2)] = vy
CB_ratio = min(cont)/max(cont)
print ("cont_min/cont_max=%f\n" %(CB_ratio))
freq_now, fft_now = FFT_vCorr(int(Nt/2), N, vx_rec, vy_rec, dt)
return freq_now, fft_now, np.mean(cont)
def MD_Period_ConstV_SD(Nt, N, x0, y0, D0, m0, Lx, Ly):
dt = D0[0]/40
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
#t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Ep_now, cont_now, p_now = force_Regular(Fx, Fy, N, x, y, D0, Lx, Ly)
Ep[nt] = Ep_now
vx = 0.1*np.divide(Fx, m0)
vy = 0.1*np.divide(Fy, m0)
x += vx*dt
y += vy*dt
F_tot[nt] = sum(np.absolute(Fx)+np.absolute(Fy))
#t_end = time.time()
print ("F_tot=%.3e" %(F_tot[nt]))
#print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
def Force_FixedPos_YFixed_calc(k, N, x, y, x0, y0, D0, vx, vy, Lx, Ly):
Fx_damp = np.zeros(N)
Fy_damp = np.zeros(N)
Ep = 0
for nn in np.arange(N):
dy = y[nn]-y0[nn]
Dmn = 0.5*D0[nn]
dx = x[nn]-x0[nn]
dx = dx-round(dx/Lx)*Lx
dmn = np.sqrt(dx**2+dy**2)
if (dmn > 0):
F = -k*(dmn/Dmn/Dmn)/dmn
Fx_damp[nn] += F*dx
Fy_damp[nn] += F*dy
Ep += (1/2)*k*(dmn/Dmn)**2
return Fx_damp, Fy_damp, Ep
def MD_VibrSP_ConstV_Yfixed_FixSpr(k, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in, ind_out):
mark_vibrY = 0
mark_resonator = 1
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
elif mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
elif mark_vibrY == 1:
x[ind_in] = x_ini[ind_in]
y[ind_in] = y_in[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Fx_damp, Fy_damp, Ep_fix = Force_FixedPos_YFixed_calc(k, N, x, y, x_ini, y_ini, D0, vx, vy, L[0], L[1])
#Fx_damp = 0; Fy_damp = 0; Ep_fix = 0
Fx_damp += -B*vx
Fy_damp += -B*vy
Ep[nt] = Ep_now + Ep_fix
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx+Fx_damp
Fy_all = Fy+Fy_damp
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in = FFT_Fup(int(Nt/2), x_in[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in = FFT_Fup(int(Nt/2), y_in[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
return freq_fft, fft_in, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV_Yfixed_FixSpr2(k, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out):
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in1]
x_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in2]
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Fx_damp, Fy_damp, Ep_fix = Force_FixedPos_YFixed_calc(k, N, x, y, x_ini, y_ini, D0, vx, vy, L[0], L[1])
#Fx_damp = 0; Fy_damp = 0; Ep_fix = 0
Fx_damp += -B*vx
Fy_damp += -B*vy
Ep[nt] = Ep_now + Ep_fix
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx+Fx_damp
Fy_all = Fy+Fy_damp
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
return freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV_Yfixed_FixSpr3(k, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr1, Amp_Vibr1, ind_in1, Freq_Vibr2, Amp_Vibr2, ind_in2, ind_out):
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in1 = Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(Nt))+x_ini[ind_in1]
x_in2 = Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in2]
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Fx_damp, Fy_damp, Ep_fix = Force_FixedPos_YFixed_calc(k, N, x, y, x_ini, y_ini, D0, vx, vy, L[0], L[1])
#Fx_damp = 0; Fy_damp = 0; Ep_fix = 0
Fx_damp += -B*vx
Fy_damp += -B*vy
Ep[nt] = Ep_now + Ep_fix
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx+Fx_damp
Fy_all = Fy+Fy_damp
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr1, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr2)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr2)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr1)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
return freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_Force_ConstV(B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in, ind_out, mark_vibrY, mark_resonator):
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
x_in = np.zeros(Nt)
y_in = np.zeros(Nt)
if mark_vibrY == 0:
Fx_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))
elif mark_vibrY == 1:
Fy_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
x_in[nt] = x[ind_in]
y_in[nt] = y[ind_in]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
if mark_vibrY == 0:
Fx_all[ind_in] += Fx_in[nt]
elif mark_vibrY == 1:
Fy_all[ind_in] += Fy_in[nt]
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = np.array(0)
Ep_now = np.array(0)
cont_now = np.array(0)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now = np.append(cont_now, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
cont_now[0] = cont_now[1]
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
if mark_vibrY == 0:
freq_fft, fft_in = FFT_Fup(int(Nt/2), x_in[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in = FFT_Fup(int(Nt/2), y_in[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
if Nt == 5e5:
print(x[ind_out], y[ind_out])
print(fft_x_out[100], fft_y_out[100])
print(fft_in[100])
return freq_fft, fft_in, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV_ConfigCB(B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr, ind_in, ind_out, Nt_rec):
mark_vibrY = 0
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in]
elif mark_vibrY == 1:
y_in = Amp_Vibr*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in]
#y_bot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
#t_start = time.time()
for nt in np.arange(Nt):
if nt == Nt_rec:
x_rec = x[:]
y_rec = y[:]
x = x+vx*dt+ax_old*dt**2/2; # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2;
if mark_vibrY == 0:
x[ind_in] = x_in[nt]
y[ind_in] = y_ini[ind_in]
elif mark_vibrY == 1:
x[ind_in] = x_ini[ind_in]
y[ind_in] = y_in[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed(Fx, Fy, N, x, y, D0, L[0], 0, L[1])
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx-B*vx
Fy_all = Fy-B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2; # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2;
ax_old = ax;
ay_old = ay;
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = np.array(0)
Ep_now = np.array(0)
cont_now = np.array(0)
for ii in np.arange(len(nt_rec)-1):
Ek_now = np.append(Ek_now, np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now = np.append(Ep_now, np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now = np.append(cont_now, np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
cont_now[0] = cont_now[1]
CB_ratio = min(cont)/max(cont)
print ("freq=%f, cont_min/cont_max=%f\n" %(Freq_Vibr, CB_ratio))
return x_rec, y_rec
def VL_YFixed_ConstV(N, x, y, D, Lx, VL_list, VL_counter_old, x_save, y_save, first_call):
r_factor = 1.2
r_cut = np.amax(D)
r_list = r_factor * r_cut
r_list_sq = r_list**2
r_skin_sq = ((r_factor - 1.0) * r_cut)**2
if first_call == 0:
dr_sq_max = 0.0
for nn in np.arange(N):
dy = y[nn] - y_save[nn]
dx = x[nn] - x_save[nn]
dx = dx - round(dx / Lx) * Lx
dr_sq = dx**2 + dy**2
if dr_sq > dr_sq_max:
dr_sq_max = dr_sq
if dr_sq_max < r_skin_sq:
return VL_list, VL_counter_old, x_save, y_save
VL_counter = 0
for nn in np.arange(N):
r_now = 0.5*D[nn]
for mm in np.arange(nn+1, N):
dy = y[mm]-y[nn]
Dmn = 0.5*(D[mm]+D[nn])
if abs(dy) < r_list:
dx = x[mm]-x[nn]
dx = dx - round(dx / Lx) * Lx
if abs(dx) < r_list:
dmn_sq = dx**2 + dy**2
if dmn_sq < r_list_sq:
VL_list[VL_counter][0] = nn
VL_list[VL_counter][1] = mm
VL_counter += 1
return VL_list, VL_counter, x, y
def MD_YFixed_ConstV_SP_SD_DiffK(Nt, N, x0, y0, D0, m0, Lx, Ly, k_list, k_type):
dt = D0[0] * np.sqrt(k_list[2]) / 20.0
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
x_save = np.array(x0)
y_save = np.array(y0)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
t_start = time.time()
for nt in np.arange(Nt):
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
vx = 0.1 * Fx
vy = 0.1 * Fy
x += vx * dt
y += vy * dt
F_tot[nt] = sum(np.absolute(Fx) + np.absolute(Fy))
# putting a threshold on total force
if (F_tot[nt] < 1e-11):
break
print(nt)
print(F_tot[nt])
t_end = time.time()
#print ("F_tot=%.3e" %(F_tot[nt]))
#print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
def FIRE_YFixed_ConstV_DiffK(Nt, N, x0, y0, D0, m0, Lx, Ly, k_list, k_type):
dt_md = 0.01 * D0[0] * np.sqrt(k_list[2])
N_delay = 20
N_pn_max = 2000
f_inc = 1.1
f_dec = 0.5
a_start = 0.15
f_a = 0.99
dt_max = 10.0 * dt_md
dt_min = 0.05 * dt_md
initialdelay = 1
Nt = int(Nt)
Ep = np.zeros(Nt)
F_tot = np.zeros(Nt)
vx = np.zeros(N)
vy = np.zeros(N)
x = np.array(x0)
y = np.array(y0)
x_save = np.array(x0)
y_save = np.array(y0)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
a_fire = a_start
delta_a_fire = 1.0 - a_fire
dt = dt_md
dt_half = dt / 2.0
N_pp = 0 # number of P being positive
N_pn = 0 # number of P being negative
## FIRE
for nt in np.arange(Nt):
# FIRE update
P = np.dot(vx, Fx) + np.dot(vy, Fy)
if P > 0.0:
N_pp += 1
N_pn = 0
if N_pp > N_delay:
dt = min(f_inc * dt, dt_max)
dt_half = dt / 2.0
a_fire = f_a * a_fire
delta_a_fire = 1.0 - a_fire
else:
N_pp = 0
N_pn += 1
if N_pn > N_pn_max:
break
if (initialdelay < 0.5) or (nt >= N_delay):
if f_dec * dt > dt_min:
dt = f_dec * dt
dt_half = dt / 2.0
a_fire = a_start
delta_a_fire = 1.0 - a_fire
x -= vx * dt_half
y -= vy * dt_half
vx = np.zeros(N)
vy = np.zeros(N)
# MD using Verlet method
vx += Fx * dt_half
vy += Fy * dt_half
rsc_fire = np.sqrt(np.sum(vx**2 + vy**2)) / np.sqrt(np.sum(Fx**2 + Fy**2))
vx = delta_a_fire * vx + a_fire * rsc_fire * Fx
vy = delta_a_fire * vy + a_fire * rsc_fire * Fy
x += vx * dt
y += vy * dt
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
F_tot[nt] = sum(np.absolute(Fx) + np.absolute(Fy))
# putting a threshold on total force
if (F_tot[nt] < 1e-11):
break
vx += Fx * dt_half
vy += Fy * dt_half
#print(nt)
#print(F_tot[nt])
t_end = time.time()
#print ("F_tot=%.3e" %(F_tot[nt]))
#print ("time=%.3e" %(t_end-t_start))
return x, y, p_now
def MD_VibrSP_ConstV_Yfixed_DiffK(k_list, k_type, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out):
Lx = L[0]
Ly = L[1]
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
F_tot = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in1]
x_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in2]
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
x_save = np.array(x_ini)
y_save = np.array(y_ini)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2 # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx - B*vx
Fy_all = Fy - B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2 # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2
ax_old = ax
ay_old = ay
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
return freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV_Yfixed_DiffK_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out):
Lx = L[0]
Ly = L[1]
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
temp = int(Nt)
Nt = int(3*int(Nt))
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
F_tot = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
#print(temp)
#print(np.concatenate((Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)))))
if mark_vibrY == 0:
#print("no")
#x_in1 = Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(Nt))+Amp_Vibr1*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in1]
#x_in2 = Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in2]
x_in1 = np.concatenate((Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)), Amp_Vibr1*np.sin(Freq_Vibr2*dt*np.arange(temp))))+x_ini[ind_in1]
x_in2 = np.concatenate((Amp_Vibr2*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)), Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(temp))))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(Nt))+Amp_Vibr1*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in2]
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
x_save = np.array(x_ini)
y_save = np.array(y_ini)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2 # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx - B*vx
Fy_all = Fy - B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2 # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2
ax_old = ax
ay_old = ay
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr1)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr1)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr1)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
return freq_fft, fft_in1, fft_in2, fft_x_out, fft_y_out, np.mean(cont), nt_rec, Ek_now, Ep_now, cont_now
def MD_VibrSP_ConstV_Yfixed_DiffK2(k_list, k_type, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out):
Lx = L[0]
Ly = L[1]
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
Nt = int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
F_tot = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in1]
x_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr*dt*np.arange(Nt))+y_ini[ind_in2]
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
x_save = np.array(x_ini)
y_save = np.array(y_ini)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2 # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx - B*vx
Fy_all = Fy - B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2 # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2
ax_old = ax
ay_old = ay
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr, dt)
return x_in1-x_ini[ind_in1], x_in2-x_ini[ind_in2], x_out-x_ini[ind_out]
def MD_VibrSP_ConstV_Yfixed_DiffK2_Freqs(k_list, k_type, B, Nt, N, x_ini, y_ini, D0, m0, L, Freq_Vibr1, Freq_Vibr2, Amp_Vibr1, ind_in1, Amp_Vibr2, ind_in2, ind_out):
Lx = L[0]
Ly = L[1]
mark_vibrY = 0
mark_resonator = 0
dt = D0[0]/40
temp = int(Nt)
Nt = 3*int(Nt)
nt_rec = np.linspace(0, Nt, int(Nt/5e4)+1)
nt_rec = nt_rec.astype(int)
Ep = np.zeros(Nt)
Ek = np.zeros(Nt)
cont = np.zeros(Nt)
p = np.zeros(Nt)
F_tot = np.zeros(Nt)
x_out = np.zeros(Nt)
y_out = np.zeros(Nt)
if mark_vibrY == 0:
x_in1 = np.concatenate((Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)), Amp_Vibr1*np.sin(Freq_Vibr2*dt*np.arange(temp))))+x_ini[ind_in1]
x_in2 = np.concatenate((Amp_Vibr2*np.sin(Freq_Vibr1*dt*np.arange(temp)), 0*np.sin(Freq_Vibr1*dt*np.arange(temp)), Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(temp))))+x_ini[ind_in2]
elif mark_vibrY == 1:
y_in1 = Amp_Vibr1*np.sin(Freq_Vibr1*dt*np.arange(Nt))+Amp_Vibr1*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in1]
y_in2 = Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+Amp_Vibr2*np.sin(Freq_Vibr2*dt*np.arange(Nt))+y_ini[ind_in2]
vx = np.zeros(N)
vy = np.zeros(N)
ax_old = np.zeros(N)
ay_old = np.zeros(N)
x = np.array(x_ini)
y = np.array(y_ini)
x_save = np.array(x_ini)
y_save = np.array(y_ini)
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list = np.zeros((N * 10, 2), dtype=int)
VL_counter = 0
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 1)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
for nt in np.arange(Nt):
x = x+vx*dt+ax_old*dt**2/2 # first step in Verlet integration
y = y+vy*dt+ay_old*dt**2/2
#if nt>int(temp):
# print("yes")
# B=10
if mark_vibrY == 0:
x[ind_in1] = x_in1[nt]
y[ind_in1] = y_ini[ind_in1]
x[ind_in2] = x_in2[nt]
y[ind_in2] = y_ini[ind_in2]
elif mark_vibrY == 1:
x[ind_in1] = x_ini[ind_in1]
y[ind_in1] = y_in1[nt]
x[ind_in2] = x_ini[ind_in2]
y[ind_in2] = y_in2[nt]
Fx = np.zeros(N)
Fy = np.zeros(N)
VL_list, VL_counter, x_save, y_save = VL_YFixed_ConstV(N, x, y, D0, Lx, VL_list, VL_counter, x_save, y_save, 0)
Fx, Fy, Fup_now, Fbot_now, Ep_now, cont_now, p_now, cont_up = force_YFixed_DiffK_VL(Fx, Fy, N, x, y, D0, Lx, 0, Ly, k_list, k_type, VL_list, VL_counter)
Ep[nt] = Ep_now
cont[nt] = cont_now
p[nt] = p_now
Fx_all = Fx - B*vx
Fy_all = Fy - B*vy
x_out[nt] = x[ind_out]
y_out[nt] = y[ind_out]
ax = np.divide(Fx_all, m0)
ay = np.divide(Fy_all, m0)
vx = vx+(ax_old+ax)*dt/2 # second step in Verlet integration
vy = vy+(ay_old+ay)*dt/2
ax_old = ax
ay_old = ay
Ek[nt] = sum(0.5*np.multiply(m0,np.multiply(vx, vx)+np.multiply(vy, vy)))
Ek_now = []
Ep_now = []
cont_now = []
for ii in np.arange(len(nt_rec)-1):
Ek_now.append(np.mean(Ek[nt_rec[ii]:nt_rec[ii+1]]))
Ep_now.append(np.mean(Ep[nt_rec[ii]:nt_rec[ii+1]]))
cont_now.append(np.mean(cont[nt_rec[ii]:nt_rec[ii+1]]))
nt_rec = (nt_rec[1:] + nt_rec[:-1]) / 2
#t_end = time.time()
#print ("time=%.3e" %(t_end-t_start))
CB_ratio = min(cont)/max(cont)
#print ("freq=%f, cont_min/cont_max=%f, Ek_mean=%.3e, Ep_mean=%.3e\n" %(Freq_Vibr, CB_ratio, np.mean(Ek), np.mean(Ep)))
if mark_vibrY == 0:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), x_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), x_in2[int(Nt/2):Nt], dt, Freq_Vibr1)
elif mark_vibrY == 1:
freq_fft, fft_in1 = FFT_Fup(int(Nt/2), y_in1[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_in2 = FFT_Fup(int(Nt/2), y_in2[int(Nt/2):Nt], dt, Freq_Vibr1)
if mark_resonator == 0:
freq_fft, fft_x_out = FFT_Fup(int(Nt/2), x_out[int(Nt/2):Nt], dt, Freq_Vibr1)
freq_fft, fft_y_out = FFT_Fup(int(Nt/2), y_out[int(Nt/2):Nt], dt, Freq_Vibr1)
elif mark_resonator == 1:
freq_fft, fft_x_out = Output_resonator_1D(Nt, x_out[0:Nt], x_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
freq_fft, fft_y_out = Output_resonator_1D(Nt, y_out[0:Nt], y_ini[ind_out], m0[ind_out], Freq_Vibr1, dt)
return x_in1-x_ini[ind_in1], x_in2-x_ini[ind_in2], x_out-x_ini[ind_out]
| 143,857 | 31.125502 | 186 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.