|
import gradio as gr |
|
from load_model import load_model |
|
import matplotlib.pyplot as plt |
|
from tensorflow.keras import layers |
|
from sklearn.datasets import make_moons |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
|
|
model = load_model() |
|
|
|
|
|
data = make_moons(3000, noise=0.05)[0].astype("float32") |
|
norm = layers.experimental.preprocessing.Normalization() |
|
norm.adapt(data) |
|
normalized_data = norm(data) |
|
z, _ = model(normalized_data) |
|
|
|
demo = gr.Blocks() |
|
|
|
with demo: |
|
gr.Markdown("""# Density estimation using Real NVP <br> |
|
This demo shows a toy example of using Real NVP (real-valued non-volume preserving transformations) |
|
from this [example](https://keras.io/examples/generative/real_nvp/). Below we have two tabs. The first, Inference, shows |
|
our mapping from a data distribution (moons) to a latent space with a known distribution (Gaussian). Click the button to see how a data point from our distribution maps |
|
to our latent space. Our second tab allows you to generate a sample from our latent space, and view the generated data space that is associated with it. |
|
|
|
Full credits for this model & example |
|
go to <br>[Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), |
|
[Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), |
|
and [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/).<br> |
|
Demo by [Brenden Connors](https://www.linkedin.com/in/brenden-connors-6a0512195).""") |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem('Inference'): |
|
button = gr.Button(value='Infer Sample Point') |
|
|
|
with gr.Row(): |
|
fig = plt.figure() |
|
plt.scatter(normalized_data[:, 0], normalized_data[:, 1], color="r") |
|
plt.xlim([-2, 2]) |
|
plt.ylim([-2, 2]) |
|
plt.title('Inference Data Space') |
|
fig2 = plt.figure() |
|
plt.scatter(z[:, 0], z[:, 1], color="r") |
|
plt.xlim([-3.5, 4]) |
|
plt.ylim([-3.5, 4]) |
|
plt.title('Inference Latent Space') |
|
data_space = gr.Plot(value = fig) |
|
latent_space = gr.Plot(value = fig2) |
|
with gr.TabItem('Generation'): |
|
button_generate = gr.Button('Generate') |
|
|
|
with gr.Row(): |
|
fig3 = plt.figure() |
|
|
|
fig4 = plt.figure() |
|
generated_lspace = gr.Plot(fig3) |
|
generated_dspace = gr.Plot(fig4) |
|
|
|
def inference_sample(): |
|
idx = np.random.choice(normalized_data.shape[0]) |
|
new_fig1 = plt.figure() |
|
plt.scatter(normalized_data[:, 0], normalized_data[:, 1], color="r") |
|
plt.scatter(normalized_data[idx, 0], normalized_data[idx, 1], color="b") |
|
plt.title('Inference Data Space') |
|
plt.xlim([-2, 2]) |
|
plt.ylim([-2, 2]) |
|
output, _ = model(np.array(normalized_data[idx, :]).reshape((1, 2))) |
|
|
|
new_fig2 = plt.figure() |
|
plt.scatter(z[:, 0], z[:, 1], color="r") |
|
plt.scatter(output[0,0] , output[0,1], color="b") |
|
plt.xlim([-3.5, 4]) |
|
plt.ylim([-3.5, 4]) |
|
plt.title('Inference Latent Space') |
|
return new_fig1, new_fig2 |
|
|
|
def generate(): |
|
samples = model.distribution.sample(3000) |
|
x, _ = model.predict(samples) |
|
|
|
new_fig1=plt.figure() |
|
plt.scatter(samples[:,0], samples[:,1]) |
|
plt.title('Generated Latent Space') |
|
plt.xlim([-3.5, 4]) |
|
plt.ylim([-3.5, 4]) |
|
|
|
new_fig2=plt.figure() |
|
plt.scatter(x[:,0], x[:,1]) |
|
plt.title('Generated Data Space') |
|
plt.xlim([-2, 2]) |
|
plt.ylim([-2, 2]) |
|
return new_fig1, new_fig2 |
|
button.click(inference_sample, inputs=[], outputs=[data_space, latent_space]) |
|
button_generate.click(generate, inputs=[], outputs=[generated_lspace, generated_dspace]) |
|
|
|
demo.launch() |