diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..5116dccd1b776ad634705271cf14f3b1737f2309 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,56 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/testA/11_3.png filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/testA/12_12.png filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/testA/13_8.png filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](12).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](188).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](192).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](197).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](215).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](231).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](238).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](254).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](281).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](288).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](302).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](307).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](310).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](329).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](33).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](374).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](392).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](41).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](445).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](449).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](454).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](512).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](517).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](529).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](533).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](54).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](565).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](576).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](587).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](642).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](648).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](670).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](672).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](732).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](751).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](757).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](759).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](825).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](834).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](845).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](846).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](849).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](880).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](884).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](90).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](902).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](93).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](940).jpg filter=lfs diff=lfs merge=lfs -text
+datasets/bw2color/trainB/a[[:space:]](95).jpg filter=lfs diff=lfs merge=lfs -text
+imgs/horse2zebra.gif filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..0ea65dbceaae276ee7e2ca5025d04c891e49b8ea
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,46 @@
+.DS_Store
+debug*
+datasets/
+checkpoints/
+results/
+build/
+dist/
+*.png
+torch.egg-info/
+*/**/__pycache__
+torch/version.py
+torch/csrc/generic/TensorMethods.cpp
+torch/lib/*.so*
+torch/lib/*.dylib*
+torch/lib/*.h
+torch/lib/build
+torch/lib/tmp_install
+torch/lib/include
+torch/lib/torch_shm_manager
+torch/csrc/cudnn/cuDNN.cpp
+torch/csrc/nn/THNN.cwrap
+torch/csrc/nn/THNN.cpp
+torch/csrc/nn/THCUNN.cwrap
+torch/csrc/nn/THCUNN.cpp
+torch/csrc/nn/THNN_generic.cwrap
+torch/csrc/nn/THNN_generic.cpp
+torch/csrc/nn/THNN_generic.h
+docs/src/**/*
+test/data/legacy_modules.t7
+test/data/gpu_tensors.pt
+test/htmlcov
+test/.coverage
+*/*.pyc
+*/**/*.pyc
+*/**/**/*.pyc
+*/**/**/**/*.pyc
+*/**/**/**/**/*.pyc
+*/*.so*
+*/**/*.so*
+*/**/*.dylib*
+test/data/legacy_serialized.pt
+*~
+.idea
+
+#Ignore Wandb
+wandb/
diff --git a/.replit b/.replit
new file mode 100644
index 0000000000000000000000000000000000000000..94513bcb6de8effa36d8335fba26ff7b3d11a703
--- /dev/null
+++ b/.replit
@@ -0,0 +1,2 @@
+language = "python3"
+run = "
[Tensorflow] (by Christopher Hesse), [Tensorflow] (by Eyyüb Sariu), [Tensorflow (face2face)] (by Dat Tran), [Tensorflow (film)] (by Arthur Juliani), [Tensorflow (zi2zi)] (by Yuchen Tian), [Chainer] (by mattya), [tf/torch/keras/lasagne] (by tjwei), [Pytorch] (by taey16)
"
\ No newline at end of file
diff --git a/CycleGAN.ipynb b/CycleGAN.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..99df56ac5ed8348393207e33a490e8c450d72498
--- /dev/null
+++ b/CycleGAN.ipynb
@@ -0,0 +1,273 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "5VIGyIus8Vr7"
+ },
+ "source": [
+ "Take a look at the [repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) for more information"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "7wNjDKdQy35h"
+ },
+ "source": [
+ "# Install"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "TRm-USlsHgEV"
+ },
+ "outputs": [],
+ "source": [
+ "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "Pt3igws3eiVp"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "z1EySlOXwwoa"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -r requirements.txt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "8daqlgVhw29P"
+ },
+ "source": [
+ "# Datasets\n",
+ "\n",
+ "Download one of the official datasets with:\n",
+ "\n",
+ "- `bash ./datasets/download_cyclegan_dataset.sh [apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos]`\n",
+ "\n",
+ "Or use your own dataset by creating the appropriate folders and adding in the images.\n",
+ "\n",
+ "- Create a dataset folder under `/dataset` for your dataset.\n",
+ "- Create subfolders `testA`, `testB`, `trainA`, and `trainB` under your dataset's folder. Place any images you want to transform from a to b (cat2dog) in the `testA` folder, images you want to transform from b to a (dog2cat) in the `testB` folder, and do the same for the `trainA` and `trainB` folders."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "vrdOettJxaCc"
+ },
+ "outputs": [],
+ "source": [
+ "!bash ./datasets/download_cyclegan_dataset.sh horse2zebra"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "gdUz4116xhpm"
+ },
+ "source": [
+ "# Pretrained models\n",
+ "\n",
+ "Download one of the official pretrained models with:\n",
+ "\n",
+ "- `bash ./scripts/download_cyclegan_model.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]`\n",
+ "\n",
+ "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "B75UqtKhxznS"
+ },
+ "outputs": [],
+ "source": [
+ "!bash ./scripts/download_cyclegan_model.sh horse2zebra"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "yFw1kDQBx3LN"
+ },
+ "source": [
+ "# Training\n",
+ "\n",
+ "- `python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan`\n",
+ "\n",
+ "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. I've found that a batch size of 16 fits onto 4 V100s and can finish training an epoch in ~90s.\n",
+ "\n",
+ "Once your model has trained, copy over the last checkpoint to a format that the testing model can automatically detect:\n",
+ "\n",
+ "Use `cp ./checkpoints/horse2zebra/latest_net_G_A.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class A to class B and `cp ./checkpoints/horse2zebra/latest_net_G_B.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class B to class A.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0sp7TCT2x9dB"
+ },
+ "outputs": [],
+ "source": [
+ "!python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id -1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "9UkcaFZiyASl"
+ },
+ "source": [
+ "# Testing\n",
+ "\n",
+ "- `python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout`\n",
+ "\n",
+ "Change the `--dataroot` and `--name` to be consistent with your trained model's configuration.\n",
+ "\n",
+ "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
+ "> The option --model test is used for generating results of CycleGAN only for one side. This option will automatically set --dataset_mode single, which only loads the images from one set. On the contrary, using --model cycle_gan requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at ./results/. Use --results_dir {directory_path_to_save_result} to specify the results directory.\n",
+ "\n",
+ "> For your own experiments, you might want to specify --netG, --norm, --no_dropout to match the generator architecture of the trained model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "uCsKkEq0yGh0"
+ },
+ "outputs": [],
+ "source": [
+ "!python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "OzSKIPUByfiN"
+ },
+ "source": [
+ "# Visualize"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "9Mgg8raPyizq"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_fake.png')\n",
+ "plt.imshow(img)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0G3oVH9DyqLQ"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_real.png')\n",
+ "plt.imshow(img)"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "collapsed_sections": [],
+ "include_colab_link": true,
+ "name": "CycleGAN",
+ "provenance": []
+ },
+ "environment": {
+ "name": "tf2-gpu.2-3.m74",
+ "type": "gcloud",
+ "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.10"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d75f0ee8466f00cf04da906a6fca115c7910399f
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,58 @@
+Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+--------------------------- LICENSE FOR pix2pix --------------------------------
+BSD License
+
+For pix2pix software
+Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+----------------------------- LICENSE FOR DCGAN --------------------------------
+BSD License
+
+For dcgan.torch software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..539bce8585fa2465344a5bed7422ca7c50d84422
--- /dev/null
+++ b/README.md
@@ -0,0 +1,246 @@
+
+
+
+
+
+# CycleGAN and pix2pix in PyTorch
+
+**New**: Please check out [contrastive-unpaired-translation](https://github.com/taesungp/contrastive-unpaired-translation) (CUT), our new unpaired image-to-image translation model that enables fast and memory-efficient training.
+
+We provide PyTorch implementations for both unpaired and paired image-to-image translation.
+
+The code was written by [Jun-Yan Zhu](https://github.com/junyanz) and [Taesung Park](https://github.com/taesungp), and supported by [Tongzhou Wang](https://github.com/SsnL).
+
+This PyTorch implementation produces results comparable to or better than our original Torch software. If you would like to reproduce the same results as in the papers, check out the original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) and [pix2pix Torch](https://github.com/phillipi/pix2pix) code in Lua/Torch.
+
+**Note**: The current software works well with PyTorch 1.4. Check out the older [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1) that supports PyTorch 0.1-0.3.
+
+You may find useful information in [training/test tips](docs/tips.md) and [frequently asked questions](docs/qa.md). To implement custom models and datasets, check out our [templates](#custom-model-and-dataset). To help users better understand and adapt our codebase, we provide an [overview](docs/overview.md) of the code structure of this repository.
+
+**CycleGAN: [Project](https://junyanz.github.io/CycleGAN/) | [Paper](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN) |
+[Tensorflow Core Tutorial](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb)**
+
+
+
+**Pix2pix: [Project](https://phillipi.github.io/pix2pix/) | [Paper](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix) |
+[Tensorflow Core Tutorial](https://www.tensorflow.org/tutorials/generative/pix2pix) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)**
+
+
+
+
+**[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | by [Christopher Hesse](https://twitter.com/christophrhesse)**
+
+
+
+If you use this code for your research, please cite:
+
+Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks.
+[Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In ICCV 2017. (* equal contributions) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt)
+
+
+Image-to-Image Translation with Conditional Adversarial Networks.
+[Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In CVPR 2017. [[Bibtex]](https://www.cs.cmu.edu/~junyanz/projects/pix2pix/pix2pix.bib)
+
+## Talks and Course
+pix2pix slides: [keynote](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.key) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.pdf),
+CycleGAN slides: [pptx](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pptx) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pdf)
+
+CycleGAN course assignment [code](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) and [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) designed by Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" at University of Toronto. Please contact the instructor if you would like to adopt it in your course.
+
+## Colab Notebook
+TensorFlow Core CycleGAN Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) | [Code](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb)
+
+TensorFlow Core pix2pix Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) | [Code](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb)
+
+PyTorch Colab notebook: [CycleGAN](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb) and [pix2pix](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)
+
+ZeroCostDL4Mic Colab notebook: [CycleGAN](https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks_Beta/CycleGAN_ZeroCostDL4Mic.ipynb) and [pix2pix](https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks_Beta/pix2pix_ZeroCostDL4Mic.ipynb)
+
+## Other implementations
+### CycleGAN
+ [Tensorflow] (by Harry Yang),
+[Tensorflow] (by Archit Rathore),
+[Tensorflow] (by Van Huy),
+[Tensorflow] (by Xiaowei Hu),
+ [Tensorflow2] (by Zhenliang He),
+ [TensorLayer1.0] (by luoxier),
+ [TensorLayer2.0] (by zsdonghao),
+[Chainer] (by Yanghua Jin),
+[Minimal PyTorch] (by yunjey),
+[Mxnet] (by Ldpe2G),
+[lasagne/Keras] (by tjwei),
+[Keras] (by Simon Karlsson),
+[OneFlow] (by Ldpe2G)
+
+
+
+### pix2pix
+ [Tensorflow] (by Christopher Hesse),
+[Tensorflow] (by Eyyüb Sariu),
+ [Tensorflow (face2face)] (by Dat Tran),
+ [Tensorflow (film)] (by Arthur Juliani),
+[Tensorflow (zi2zi)] (by Yuchen Tian),
+[Chainer] (by mattya),
+[tf/torch/keras/lasagne] (by tjwei),
+[Pytorch] (by taey16)
+
+
+
+## Prerequisites
+- Linux or macOS
+- Python 3
+- CPU or NVIDIA GPU + CUDA CuDNN
+
+## Getting Started
+### Installation
+
+- Clone this repo:
+```bash
+git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
+cd pytorch-CycleGAN-and-pix2pix
+```
+
+- Install [PyTorch](http://pytorch.org) and 0.4+ and other dependencies (e.g., torchvision, [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)).
+ - For pip users, please type the command `pip install -r requirements.txt`.
+ - For Conda users, you can create a new Conda environment using `conda env create -f environment.yml`.
+ - For Docker users, we provide the pre-built Docker image and Dockerfile. Please refer to our [Docker](docs/docker.md) page.
+ - For Repl users, please click [](https://repl.it/github/junyanz/pytorch-CycleGAN-and-pix2pix).
+
+### CycleGAN train/test
+- Download a CycleGAN dataset (e.g. maps):
+```bash
+bash ./datasets/download_cyclegan_dataset.sh maps
+```
+- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
+- To log training progress and test images to W&B dashboard, set the `--use_wandb` flag with train and test script
+- Train a model:
+```bash
+#!./scripts/train_cyclegan.sh
+python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+```
+To see more intermediate results, check out `./checkpoints/maps_cyclegan/web/index.html`.
+- Test the model:
+```bash
+#!./scripts/test_cyclegan.sh
+python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+```
+- The test results will be saved to a html file here: `./results/maps_cyclegan/latest_test/index.html`.
+
+### pix2pix train/test
+- Download a pix2pix dataset (e.g.[facades](http://cmp.felk.cvut.cz/~tylecr1/facade/)):
+```bash
+bash ./datasets/download_pix2pix_dataset.sh facades
+```
+- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
+- To log training progress and test images to W&B dashboard, set the `--use_wandb` flag with train and test script
+- Train a model:
+```bash
+#!./scripts/train_pix2pix.sh
+python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+```
+To see more intermediate results, check out `./checkpoints/facades_pix2pix/web/index.html`.
+
+- Test the model (`bash ./scripts/test_pix2pix.sh`):
+```bash
+#!./scripts/test_pix2pix.sh
+python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+```
+- The test results will be saved to a html file here: `./results/facades_pix2pix/test_latest/index.html`. You can find more scripts at `scripts` directory.
+- To train and test pix2pix-based colorization models, please add `--model colorization` and `--dataset_mode colorization`. See our training [tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#notes-on-colorization) for more details.
+
+### Apply a pre-trained model (CycleGAN)
+- You can download a pretrained model (e.g. horse2zebra) with the following script:
+```bash
+bash ./scripts/download_cyclegan_model.sh horse2zebra
+```
+- The pretrained model is saved at `./checkpoints/{name}_pretrained/latest_net_G.pth`. Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) for all the available CycleGAN models.
+- To test the model, you also need to download the horse2zebra dataset:
+```bash
+bash ./datasets/download_cyclegan_dataset.sh horse2zebra
+```
+
+- Then generate the results using
+```bash
+python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
+```
+- The option `--model test` is used for generating results of CycleGAN only for one side. This option will automatically set `--dataset_mode single`, which only loads the images from one set. On the contrary, using `--model cycle_gan` requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at `./results/`. Use `--results_dir {directory_path_to_save_result}` to specify the results directory.
+
+- For pix2pix and your own models, you need to explicitly specify `--netG`, `--norm`, `--no_dropout` to match the generator architecture of the trained model. See this [FAQ](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md#runtimeerror-errors-in-loading-state_dict-812-671461-296) for more details.
+
+### Apply a pre-trained model (pix2pix)
+Download a pre-trained model with `./scripts/download_pix2pix_model.sh`.
+
+- Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) for all the available pix2pix models. For example, if you would like to download label2photo model on the Facades dataset,
+```bash
+bash ./scripts/download_pix2pix_model.sh facades_label2photo
+```
+- Download the pix2pix facades datasets:
+```bash
+bash ./datasets/download_pix2pix_dataset.sh facades
+```
+- Then generate the results using
+```bash
+python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained
+```
+- Note that we specified `--direction BtoA` as Facades dataset's A to B direction is photos to labels.
+
+- If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use `--model test` option. See `./scripts/test_single.sh` for how to apply a model to Facade label maps (stored in the directory `facades/testB`).
+
+- See a list of currently available models at `./scripts/download_pix2pix_model.sh`
+
+## [Docker](docs/docker.md)
+We provide the pre-built Docker image and Dockerfile that can run this code repo. See [docker](docs/docker.md).
+
+## [Datasets](docs/datasets.md)
+Download pix2pix/CycleGAN datasets and create your own datasets.
+
+## [Training/Test Tips](docs/tips.md)
+Best practice for training and testing your models.
+
+## [Frequently Asked Questions](docs/qa.md)
+Before you post a new question, please first look at the above Q & A and existing GitHub issues.
+
+## Custom Model and Dataset
+If you plan to implement custom models and dataset for your new applications, we provide a dataset [template](data/template_dataset.py) and a model [template](models/template_model.py) as a starting point.
+
+## [Code structure](docs/overview.md)
+To help users better understand and use our code, we briefly overview the functionality and implementation of each package and each module.
+
+## Pull Request
+You are always welcome to contribute to this repository by sending a [pull request](https://help.github.com/articles/about-pull-requests/).
+Please run `flake8 --ignore E501 .` and `python ./scripts/test_before_push.py` before you commit the code. Please also update the code structure [overview](docs/overview.md) accordingly if you add or remove files.
+
+## Citation
+If you use this code for your research, please cite our papers.
+```
+@inproceedings{CycleGAN2017,
+ title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks},
+ author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A},
+ booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on},
+ year={2017}
+}
+
+
+@inproceedings{isola2017image,
+ title={Image-to-Image Translation with Conditional Adversarial Networks},
+ author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
+ booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on},
+ year={2017}
+}
+```
+
+## Other Languages
+[Spanish](docs/README_es.md)
+
+## Related Projects
+**[contrastive-unpaired-translation](https://github.com/taesungp/contrastive-unpaired-translation) (CUT)**
+**[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) |
+[pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD)|
+[BicycleGAN](https://github.com/junyanz/BicycleGAN) | [vid2vid](https://tcwang0509.github.io/vid2vid/) | [SPADE/GauGAN](https://github.com/NVlabs/SPADE)**
+**[iGAN](https://github.com/junyanz/iGAN) | [GAN Dissection](https://github.com/CSAILVision/GANDissect) | [GAN Paint](http://ganpaint.io/)**
+
+## Cat Paper Collection
+If you love cats, and love reading cool graphics, vision, and learning papers, please check out the Cat Paper [Collection](https://github.com/junyanz/CatPapers).
+
+## Acknowledgments
+Our code is inspired by [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan).
diff --git a/checkpoints/bw2color/115_net_D.pth b/checkpoints/bw2color/115_net_D.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7a1c259ec9c559028ee87fbd968c6416f33fd288
--- /dev/null
+++ b/checkpoints/bw2color/115_net_D.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bcd750209fe24f61e92b68560120825b73082495a4640d9e8c01a9dadd7c52e5
+size 11076872
diff --git a/checkpoints/bw2color/115_net_G.pth b/checkpoints/bw2color/115_net_G.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f910e74b44b81d81c56862f7528716489dc43490
--- /dev/null
+++ b/checkpoints/bw2color/115_net_G.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd30259282564b9026db75345ad672bff504350a552c19d3413c019d7ee7fdd9
+size 217710092
diff --git a/checkpoints/bw2color/bw2color.pth b/checkpoints/bw2color/bw2color.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6e39de5be415318af18ba864d98e14b4c683f992
--- /dev/null
+++ b/checkpoints/bw2color/bw2color.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:110fc38ccd54ecae7ff81aa131f519a9eed1b839eaf5c4716cb194ee4a8d68e8
+size 217710350
diff --git a/checkpoints/bw2color/latest_net_D.pth b/checkpoints/bw2color/latest_net_D.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d005ba2de4ef075387a5a8b58bfe192cd6ec93c4
--- /dev/null
+++ b/checkpoints/bw2color/latest_net_D.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5b83cdcf7625bfb5e196dc9c6d47333a361095690db62f2ad1cf92d9a2986ee
+size 11076950
diff --git a/checkpoints/bw2color/latest_net_G_A.pth b/checkpoints/bw2color/latest_net_G_A.pth
new file mode 100644
index 0000000000000000000000000000000000000000..caefa7332c9d1738270a38d0a9c8957d92f905dc
--- /dev/null
+++ b/checkpoints/bw2color/latest_net_G_A.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c607ba5bee252b896387d91691222f5d1e4df3f83e7cd27704629621330cab81
+size 217710350
diff --git a/checkpoints/bw2color/loss_log.txt b/checkpoints/bw2color/loss_log.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e2ee1ebd9182533dd615f6bdaa556e013e069822
--- /dev/null
+++ b/checkpoints/bw2color/loss_log.txt
@@ -0,0 +1,17 @@
+================ Training Loss (Sat Nov 4 23:03:46 2023) ================
+(epoch: 8, iters: 2, time: 0.198, data: 0.717) G_GAN: 1.010 G_L1: 7.272 D_real: 0.533 D_fake: 0.516
+(epoch: 15, iters: 4, time: 0.199, data: 0.001) G_GAN: 1.228 G_L1: 4.958 D_real: 0.364 D_fake: 0.573
+(epoch: 22, iters: 6, time: 0.227, data: 0.003) G_GAN: 1.043 G_L1: 4.291 D_real: 0.214 D_fake: 0.797
+(epoch: 29, iters: 8, time: 2.188, data: 0.001) G_GAN: 0.901 G_L1: 2.646 D_real: 0.669 D_fake: 0.546
+(epoch: 36, iters: 10, time: 0.257, data: 0.005) G_GAN: 1.026 G_L1: 2.751 D_real: 0.526 D_fake: 0.560
+(epoch: 43, iters: 12, time: 0.242, data: 0.001) G_GAN: 1.380 G_L1: 3.614 D_real: 0.305 D_fake: 0.586
+(epoch: 50, iters: 14, time: 0.256, data: 0.003) G_GAN: 0.709 G_L1: 2.387 D_real: 0.763 D_fake: 0.772
+(epoch: 58, iters: 2, time: 2.606, data: 0.526) G_GAN: 0.973 G_L1: 3.211 D_real: 0.583 D_fake: 0.805
+(epoch: 65, iters: 4, time: 0.239, data: 0.005) G_GAN: 0.849 G_L1: 2.521 D_real: 0.685 D_fake: 0.521
+(epoch: 72, iters: 6, time: 0.227, data: 0.004) G_GAN: 0.768 G_L1: 2.132 D_real: 0.898 D_fake: 0.606
+(epoch: 79, iters: 8, time: 0.186, data: 0.003) G_GAN: 0.764 G_L1: 1.370 D_real: 0.824 D_fake: 0.625
+(epoch: 86, iters: 10, time: 1.048, data: 0.020) G_GAN: 1.167 G_L1: 3.618 D_real: 0.286 D_fake: 0.943
+(epoch: 93, iters: 12, time: 0.256, data: 0.001) G_GAN: 0.800 G_L1: 1.420 D_real: 0.879 D_fake: 0.532
+(epoch: 100, iters: 14, time: 0.250, data: 0.003) G_GAN: 0.689 G_L1: 1.218 D_real: 0.590 D_fake: 0.869
+(epoch: 108, iters: 2, time: 0.168, data: 0.382) G_GAN: 0.871 G_L1: 2.465 D_real: 0.585 D_fake: 0.526
+(epoch: 115, iters: 4, time: 1.077, data: 0.006) G_GAN: 0.732 G_L1: 1.168 D_real: 0.869 D_fake: 0.569
diff --git a/checkpoints/bw2color/opt.txt b/checkpoints/bw2color/opt.txt
new file mode 100644
index 0000000000000000000000000000000000000000..247925ff67cd509001b687316a1f6b49011a7d5b
--- /dev/null
+++ b/checkpoints/bw2color/opt.txt
@@ -0,0 +1,35 @@
+------------ Options -------------
+align_data: False
+aspect_ratio: 1.0
+batchSize: 1
+checkpoints_dir: ./checkpoints
+dataroot: None
+display_id: 1
+display_winsize: 256
+fineSize: 256
+gpu_ids: []
+how_many: 50
+identity: 0.0
+image_path: C:\Users\thera\Downloads\DataSet\09.png
+input_nc: 3
+isTrain: False
+loadSize: 286
+max_dataset_size: inf
+model: colorization
+nThreads: 2
+n_layers_D: 3
+name: bw2color
+ndf: 64
+ngf: 64
+norm: instance
+ntest: inf
+output_nc: 3
+phase: test
+results_dir: ./results/
+serial_batches: False
+use_dropout: True
+which_direction: AtoB
+which_epoch: latest
+which_model_netD: basic
+which_model_netG: resnet_9blocks
+-------------- End ----------------
diff --git a/checkpoints/bw2color/test_opt.txt b/checkpoints/bw2color/test_opt.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d40b8ee7f57fe901a9d6024118f78657001a51b
--- /dev/null
+++ b/checkpoints/bw2color/test_opt.txt
@@ -0,0 +1,45 @@
+----------------- Options ---------------
+ aspect_ratio: 1.0
+ batch_size: 1
+ checkpoints_dir: ./checkpoints
+ crop_size: None
+ dataroot: None
+ dataset_mode: colorization
+ direction: AtoB
+ display_winsize: 256
+ epoch: latest
+ eval: False
+ gpu_ids: -1 [default: 0]
+ how_many: 50
+ image_path: C:\Users\thera\Downloads\55Sin ttulo.png [default: None]
+ init_gain: 0.02
+ init_type: normal
+ input_nc: 1
+ isTrain: False [default: None]
+ load_iter: 0 [default: 0]
+ load_size: None
+ max_dataset_size: inf
+ model: colorization
+ n_layers_D: 3
+ name: bw2color [default: experiment_name]
+ ndf: 64
+ netD: basic
+ netG: unet_256
+ ngf: 64
+ no_dropout: False
+ no_flip: False
+ norm: batch
+ ntest: inf
+ num_test: 50
+ num_threads: 4
+ output_nc: 2
+ phase: test
+ preprocess: resize_and_crop
+ results_dir: ./results/
+ serial_batches: False
+ suffix:
+ use_wandb: False
+ verbose: False
+ wandb_project_name: CycleGAN-and-pix2pix
+ which_epoch: latest
+----------------- End -------------------
diff --git a/checkpoints/bw2color/web/images/epoch004_fake_A.png b/checkpoints/bw2color/web/images/epoch004_fake_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..c148cf7084c10dc41dc385b4296a594cbf99aa05
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_fake_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_fake_B.png b/checkpoints/bw2color/web/images/epoch004_fake_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..83b99977bf31554c825109a536d4335197c07542
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_fake_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_idt_A.png b/checkpoints/bw2color/web/images/epoch004_idt_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..de3162b5db5b2fe0d57c0f0e9f7f10b63345a488
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_idt_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_idt_B.png b/checkpoints/bw2color/web/images/epoch004_idt_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f4ae90de070c1683989ea1cc3f703c77f160979
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_idt_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_real_A.png b/checkpoints/bw2color/web/images/epoch004_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..ee6d01b30d16fc063cbb3977222f9df43d2dfd3b
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_real_B.png b/checkpoints/bw2color/web/images/epoch004_real_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..fbb64932b8df839a83f9c0010f263e19bbd6b2d2
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_real_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_rec_A.png b/checkpoints/bw2color/web/images/epoch004_rec_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..74bc77fa649a61cbd0f9b55a3358ea45dfe9622e
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_rec_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch004_rec_B.png b/checkpoints/bw2color/web/images/epoch004_rec_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..e7bfd222090a9fc624b25c5117e4cbeee05fd7b1
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch004_rec_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_fake_A.png b/checkpoints/bw2color/web/images/epoch008_fake_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..c48a81d6d1f0b4d82c0d81c72e3310bb95c807c7
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_fake_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_fake_B.png b/checkpoints/bw2color/web/images/epoch008_fake_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..e4c0f1bd309e63d2eb50b0c1938343b7ad02fc64
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_fake_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_idt_A.png b/checkpoints/bw2color/web/images/epoch008_idt_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..68f9ac38f7c2631d7b17e83b52560b8d3dfe4398
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_idt_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_idt_B.png b/checkpoints/bw2color/web/images/epoch008_idt_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..46280456c73233aaa8c11f05c2bb11cee70ae0a2
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_idt_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_real_A.png b/checkpoints/bw2color/web/images/epoch008_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..0f6b5d466fdc8ad31d996cf80761a28442c5a7c4
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_real_B.png b/checkpoints/bw2color/web/images/epoch008_real_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..700671efacc86176d95899c7d7655c717f5d6cc4
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_real_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_rec_A.png b/checkpoints/bw2color/web/images/epoch008_rec_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..6779a1da2766b2693c949b76b5c66da0c9363a11
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_rec_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch008_rec_B.png b/checkpoints/bw2color/web/images/epoch008_rec_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..5571f3369cf8b1cbfbb5bae12bde37fc4b1c8832
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch008_rec_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_fake_A.png b/checkpoints/bw2color/web/images/epoch012_fake_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..905efdab6442d520d62285f77909ff85657759a1
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_fake_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_fake_B.png b/checkpoints/bw2color/web/images/epoch012_fake_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..a04c928924519364af66952291194f44baed52ec
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_fake_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_idt_A.png b/checkpoints/bw2color/web/images/epoch012_idt_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..60fb1d444834e904ed37a187352a2cb2e6b2d589
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_idt_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_idt_B.png b/checkpoints/bw2color/web/images/epoch012_idt_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..e8fe9df75ced8b5aca1827ac013d3bfc41de0e64
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_idt_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_real_A.png b/checkpoints/bw2color/web/images/epoch012_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..f2ccf6bbd59fb4fb712e9bcc7d8b2f13e69015a1
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_real_B.png b/checkpoints/bw2color/web/images/epoch012_real_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..300019800d45b11bded572500f45af8ebef96f9e
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_real_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_rec_A.png b/checkpoints/bw2color/web/images/epoch012_rec_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..501adff5d0436bd7f23301351632a9213425c46e
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_rec_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch012_rec_B.png b/checkpoints/bw2color/web/images/epoch012_rec_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..ee95f7eb52104cbcf9cfe709b58bf4928773a6c6
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch012_rec_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_fake_A.png b/checkpoints/bw2color/web/images/epoch016_fake_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..ec0b79ddb86feb95f748a1e3aab4f19c9fd0b1ed
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_fake_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_fake_B.png b/checkpoints/bw2color/web/images/epoch016_fake_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..1ad417f668bcfbbe5ebb3be357ce0f25aaf027ec
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_fake_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_idt_A.png b/checkpoints/bw2color/web/images/epoch016_idt_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..c808d657a25a7c6240e52e25d7b73bfe078f8941
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_idt_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_idt_B.png b/checkpoints/bw2color/web/images/epoch016_idt_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..1690c4a3558128e11d9a82b68b1e5a3b8537917c
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_idt_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_real_A.png b/checkpoints/bw2color/web/images/epoch016_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..fe246bf5b8f93017853813d3b8883e79804b660f
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_real_B.png b/checkpoints/bw2color/web/images/epoch016_real_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..addc44ec9caa17a6471214a6ad08dd2476c6ea67
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_real_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_rec_A.png b/checkpoints/bw2color/web/images/epoch016_rec_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..48e4a3516ee9b4057839abf6829cec3842b6da31
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_rec_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch016_rec_B.png b/checkpoints/bw2color/web/images/epoch016_rec_B.png
new file mode 100644
index 0000000000000000000000000000000000000000..2a0ae5f3c0344add1f21998ee7e61678d77af4d9
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch016_rec_B.png differ
diff --git a/checkpoints/bw2color/web/images/epoch029_fake_B_rgb.png b/checkpoints/bw2color/web/images/epoch029_fake_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..92b50c2b9588ec45249cf413ac4fe612f3dd125a
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch029_fake_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch029_real_A.png b/checkpoints/bw2color/web/images/epoch029_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..59933c12623d64d8e871c57a81935ff35a250568
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch029_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch029_real_B_rgb.png b/checkpoints/bw2color/web/images/epoch029_real_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..f1a93df086f6d43b977fae7a61fb063982df6803
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch029_real_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch058_fake_B_rgb.png b/checkpoints/bw2color/web/images/epoch058_fake_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..866b269c4ef4a6a6ba7a3edec3d936aa5fb94865
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch058_fake_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch058_real_A.png b/checkpoints/bw2color/web/images/epoch058_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..277bc91384a1c5704b3f5b3f805153e8e30a883b
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch058_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch058_real_B_rgb.png b/checkpoints/bw2color/web/images/epoch058_real_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..8ce7018ae008de777f60db962d7710dad951f453
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch058_real_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch086_fake_B_rgb.png b/checkpoints/bw2color/web/images/epoch086_fake_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..d5fa36af2c159ef91fe857f69e551b8518b95e53
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch086_fake_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch086_real_A.png b/checkpoints/bw2color/web/images/epoch086_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..84e44952f033b2d88956e44101f680b28eef49d4
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch086_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch086_real_B_rgb.png b/checkpoints/bw2color/web/images/epoch086_real_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..1b0bc2a119c91c50fb5d49fe0b186bbf117acda6
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch086_real_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch115_fake_B_rgb.png b/checkpoints/bw2color/web/images/epoch115_fake_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..75cbaad7118e58c3a574c4cdf864b20ef16b5500
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch115_fake_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/images/epoch115_real_A.png b/checkpoints/bw2color/web/images/epoch115_real_A.png
new file mode 100644
index 0000000000000000000000000000000000000000..32ff12add6f07c6ca024c29debda2064f36d767f
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch115_real_A.png differ
diff --git a/checkpoints/bw2color/web/images/epoch115_real_B_rgb.png b/checkpoints/bw2color/web/images/epoch115_real_B_rgb.png
new file mode 100644
index 0000000000000000000000000000000000000000..26d883e0eae8e9b576dbb415c94ad6ade3bd146b
Binary files /dev/null and b/checkpoints/bw2color/web/images/epoch115_real_B_rgb.png differ
diff --git a/checkpoints/bw2color/web/index.html b/checkpoints/bw2color/web/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..be7e64f4d57844a7015883576984b8f26ac1d7fd
--- /dev/null
+++ b/checkpoints/bw2color/web/index.html
@@ -0,0 +1,3344 @@
+
+
+
+ Experiment name = bw2color
+
+
+
+ epoch [115]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [114]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [113]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [112]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [111]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [110]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [109]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [108]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [107]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [106]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [105]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [104]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [103]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [102]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [101]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [100]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [99]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [98]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [97]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [96]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [95]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [94]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [93]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [92]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [91]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [90]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [89]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [88]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [87]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [86]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [85]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [84]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [83]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [82]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [81]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [80]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [79]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [78]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [77]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [76]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [75]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [74]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [73]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [72]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [71]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [70]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [69]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [68]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [67]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [66]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [65]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [64]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [63]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [62]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [61]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [60]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [59]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [58]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [57]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [56]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [55]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [54]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [53]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [52]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [51]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [50]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [49]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [48]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [47]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [46]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [45]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [44]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [43]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [42]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [41]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [40]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [39]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [38]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [37]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [36]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [35]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [34]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [33]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [32]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [31]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [30]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [29]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [28]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [27]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [26]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [25]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [24]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [23]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [22]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [21]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [20]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [19]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [18]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [17]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [16]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [15]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [14]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [13]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [12]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [11]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [10]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [9]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [8]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [7]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [6]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [5]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [4]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [3]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [2]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+ epoch [1]
+
+
+
+
+
+
+
+ real_A
+
+ |
+
+
+
+
+
+ real_B_rgb
+
+ |
+
+
+
+
+
+ fake_B_rgb
+
+ |
+
+
+
+
\ No newline at end of file
diff --git a/data/__init__.py b/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cb618618fc301ce5440720ba62c899f6d4e7321
--- /dev/null
+++ b/data/__init__.py
@@ -0,0 +1,93 @@
+"""This package includes all the modules related to data loading and preprocessing
+
+ To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
+ You need to implement four functions:
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
+ -- <__len__>: return the size of dataset.
+ -- <__getitem__>: get a data point from data loader.
+ -- : (optionally) add dataset-specific options and set default options.
+
+Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
+See our template dataset class 'template_dataset.py' for more details.
+"""
+import importlib
+import torch.utils.data
+from data.base_dataset import BaseDataset
+
+
+def find_dataset_using_name(dataset_name):
+ """Import the module "data/[dataset_name]_dataset.py".
+
+ In the file, the class called DatasetNameDataset() will
+ be instantiated. It has to be a subclass of BaseDataset,
+ and it is case-insensitive.
+ """
+ dataset_filename = "data." + dataset_name + "_dataset"
+ datasetlib = importlib.import_module(dataset_filename)
+
+ dataset = None
+ target_dataset_name = dataset_name.replace('_', '') + 'dataset'
+ for name, cls in datasetlib.__dict__.items():
+ if name.lower() == target_dataset_name.lower() \
+ and issubclass(cls, BaseDataset):
+ dataset = cls
+
+ if dataset is None:
+ raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
+
+ return dataset
+
+
+def get_option_setter(dataset_name):
+ """Return the static method of the dataset class."""
+ dataset_class = find_dataset_using_name(dataset_name)
+ return dataset_class.modify_commandline_options
+
+
+def create_dataset(opt):
+ """Create a dataset given the option.
+
+ This function wraps the class CustomDatasetDataLoader.
+ This is the main interface between this package and 'train.py'/'test.py'
+
+ Example:
+ >>> from data import create_dataset
+ >>> dataset = create_dataset(opt)
+ """
+ data_loader = CustomDatasetDataLoader(opt)
+ dataset = data_loader.load_data()
+ return dataset
+
+
+class CustomDatasetDataLoader():
+ """Wrapper class of Dataset class that performs multi-threaded data loading"""
+
+ def __init__(self, opt):
+ """Initialize this class
+
+ Step 1: create a dataset instance given the name [dataset_mode]
+ Step 2: create a multi-threaded data loader.
+ """
+ self.opt = opt
+ dataset_class = find_dataset_using_name(opt.dataset_mode)
+ self.dataset = dataset_class(opt)
+ print("dataset [%s] was created" % type(self.dataset).__name__)
+ self.dataloader = torch.utils.data.DataLoader(
+ self.dataset,
+ batch_size=opt.batch_size,
+ shuffle=not opt.serial_batches,
+ num_workers=int(opt.num_threads))
+
+ def load_data(self):
+ return self
+
+ def __len__(self):
+ """Return the number of data in the dataset"""
+ return min(len(self.dataset), self.opt.max_dataset_size)
+
+ def __iter__(self):
+ """Return a batch of data"""
+ for i, data in enumerate(self.dataloader):
+ if i * self.opt.batch_size >= self.opt.max_dataset_size:
+ break
+ yield data
diff --git a/data/__pycache__/__init__.cpython-39.pyc b/data/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60315c02beaa2fe458b624c9c6011d9dbd0a5b8b
Binary files /dev/null and b/data/__pycache__/__init__.cpython-39.pyc differ
diff --git a/data/__pycache__/base_dataset.cpython-39.pyc b/data/__pycache__/base_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26e839860e9e30eb08b169fc957e2acb014afc9e
Binary files /dev/null and b/data/__pycache__/base_dataset.cpython-39.pyc differ
diff --git a/data/__pycache__/colorization_dataset.cpython-39.pyc b/data/__pycache__/colorization_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba26b817e88db32c559e8c9f69e5c2d745d10c6e
Binary files /dev/null and b/data/__pycache__/colorization_dataset.cpython-39.pyc differ
diff --git a/data/__pycache__/image_folder.cpython-39.pyc b/data/__pycache__/image_folder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e48eb7a1fb327fc1282a0b872d970d6648c5af55
Binary files /dev/null and b/data/__pycache__/image_folder.cpython-39.pyc differ
diff --git a/data/aligned_dataset.py b/data/aligned_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..59a58108c227761766342b8b1d444133118586cf
--- /dev/null
+++ b/data/aligned_dataset.py
@@ -0,0 +1,60 @@
+import os
+from data.base_dataset import BaseDataset, get_params, get_transform
+from data.image_folder import make_dataset
+from PIL import Image
+
+
+class AlignedDataset(BaseDataset):
+ """A dataset class for paired image dataset.
+
+ It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
+ During test time, you need to prepare a directory '/path/to/data/test'.
+ """
+
+ def __init__(self, opt):
+ """Initialize this dataset class.
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseDataset.__init__(self, opt)
+ self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
+ self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
+ assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
+ self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
+ self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
+
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index - - a random integer for data indexing
+
+ Returns a dictionary that contains A, B, A_paths and B_paths
+ A (tensor) - - an image in the input domain
+ B (tensor) - - its corresponding image in the target domain
+ A_paths (str) - - image paths
+ B_paths (str) - - image paths (same as A_paths)
+ """
+ # read a image given a random integer index
+ AB_path = self.AB_paths[index]
+ AB = Image.open(AB_path).convert('RGB')
+ # split AB image into A and B
+ w, h = AB.size
+ w2 = int(w / 2)
+ A = AB.crop((0, 0, w2, h))
+ B = AB.crop((w2, 0, w, h))
+
+ # apply the same transform to both A and B
+ transform_params = get_params(self.opt, A.size)
+ A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
+ B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
+
+ A = A_transform(A)
+ B = B_transform(B)
+
+ return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
+
+ def __len__(self):
+ """Return the total number of images in the dataset."""
+ return len(self.AB_paths)
diff --git a/data/base_dataset.py b/data/base_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8eb78ed51ab1435fd3a52e635a58399f03a7caa
--- /dev/null
+++ b/data/base_dataset.py
@@ -0,0 +1,167 @@
+"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
+
+It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
+"""
+import random
+import numpy as np
+import torch.utils.data as data
+from PIL import Image
+import torchvision.transforms as transforms
+from abc import ABC, abstractmethod
+
+
+class BaseDataset(data.Dataset, ABC):
+ """This class is an abstract base class (ABC) for datasets.
+
+ To create a subclass, you need to implement the following four functions:
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
+ -- <__len__>: return the size of dataset.
+ -- <__getitem__>: get a data point.
+ -- : (optionally) add dataset-specific options and set default options.
+ """
+
+ def __init__(self, opt):
+ """Initialize the class; save the options in the class
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ self.opt = opt
+ self.root = opt.dataroot
+
+ @staticmethod
+ def modify_commandline_options(parser, is_train):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+ """
+ return parser
+
+ @abstractmethod
+ def __len__(self):
+ """Return the total number of images in the dataset."""
+ return 0
+
+ @abstractmethod
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index - - a random integer for data indexing
+
+ Returns:
+ a dictionary of data with their names. It ususally contains the data itself and its metadata information.
+ """
+ pass
+
+
+def get_params(opt, size):
+ w, h = size
+ new_h = h
+ new_w = w
+ if opt.preprocess == 'resize_and_crop':
+ new_h = new_w = opt.load_size
+ elif opt.preprocess == 'scale_width_and_crop':
+ new_w = opt.load_size
+ new_h = opt.load_size * h // w
+
+ x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
+ y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
+
+ flip = random.random() > 0.5
+
+ return {'crop_pos': (x, y), 'flip': flip}
+
+
+def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
+ transform_list = []
+ if grayscale:
+ transform_list.append(transforms.Grayscale(1))
+ if 'resize' in opt.preprocess:
+ osize = [opt.load_size, opt.load_size]
+ transform_list.append(transforms.Resize(osize, method))
+ elif 'scale_width' in opt.preprocess:
+ transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
+
+ if 'crop' in opt.preprocess:
+ if params is None:
+ transform_list.append(transforms.RandomCrop(opt.crop_size))
+ else:
+ transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
+
+ if opt.preprocess == 'none':
+ transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
+
+ if not opt.no_flip:
+ if params is None:
+ transform_list.append(transforms.RandomHorizontalFlip())
+ elif params['flip']:
+ transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
+
+ if convert:
+ transform_list += [transforms.ToTensor()]
+ if grayscale:
+ transform_list += [transforms.Normalize((0.5,), (0.5,))]
+ else:
+ transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
+ return transforms.Compose(transform_list)
+
+
+def __transforms2pil_resize(method):
+ mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
+ transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
+ transforms.InterpolationMode.NEAREST: Image.NEAREST,
+ transforms.InterpolationMode.LANCZOS: Image.LANCZOS,}
+ return mapper[method]
+
+
+def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
+ method = __transforms2pil_resize(method)
+ ow, oh = img.size
+ h = int(round(oh / base) * base)
+ w = int(round(ow / base) * base)
+ if h == oh and w == ow:
+ return img
+
+ __print_size_warning(ow, oh, w, h)
+ return img.resize((w, h), method)
+
+
+def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
+ method = __transforms2pil_resize(method)
+ ow, oh = img.size
+ if ow == target_size and oh >= crop_size:
+ return img
+ w = target_size
+ h = int(max(target_size * oh / ow, crop_size))
+ return img.resize((w, h), method)
+
+
+def __crop(img, pos, size):
+ ow, oh = img.size
+ x1, y1 = pos
+ tw = th = size
+ if (ow > tw or oh > th):
+ return img.crop((x1, y1, x1 + tw, y1 + th))
+ return img
+
+
+def __flip(img, flip):
+ if flip:
+ return img.transpose(Image.FLIP_LEFT_RIGHT)
+ return img
+
+
+def __print_size_warning(ow, oh, w, h):
+ """Print warning information about image size(only print once)"""
+ if not hasattr(__print_size_warning, 'has_printed'):
+ print("The image size needs to be a multiple of 4. "
+ "The loaded image size was (%d, %d), so it was adjusted to "
+ "(%d, %d). This adjustment will be done to all images "
+ "whose sizes are not multiples of 4" % (ow, oh, w, h))
+ __print_size_warning.has_printed = True
diff --git a/data/colorization_dataset.py b/data/colorization_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..4180786a674525880695ded485bb9fbbbab3a6d3
--- /dev/null
+++ b/data/colorization_dataset.py
@@ -0,0 +1,68 @@
+import os
+from data.base_dataset import BaseDataset, get_transform
+from data.image_folder import make_dataset
+from skimage import color # require skimage
+from PIL import Image
+import numpy as np
+import torchvision.transforms as transforms
+
+
+class ColorizationDataset(BaseDataset):
+ """This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
+
+ This dataset is required by pix2pix-based colorization model ('--model colorization')
+ """
+ @staticmethod
+ def modify_commandline_options(parser, is_train):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+
+ By default, the number of channels for input image is 1 (L) and
+ the number of channels for output image is 2 (ab). The direction is from A to B
+ """
+ parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
+ return parser
+
+ def __init__(self, opt):
+ """Initialize this dataset class.
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseDataset.__init__(self, opt)
+ self.dir = os.path.join(opt.dataroot, opt.phase)
+ self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
+ assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
+ self.transform = get_transform(self.opt, convert=False)
+
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index - - a random integer for data indexing
+
+ Returns a dictionary that contains A, B, A_paths and B_paths
+ A (tensor) - - the L channel of an image
+ B (tensor) - - the ab channels of the same image
+ A_paths (str) - - image paths
+ B_paths (str) - - image paths (same as A_paths)
+ """
+ path = self.AB_paths[index]
+ im = Image.open(path).convert('RGB')
+ im = self.transform(im)
+ im = np.array(im)
+ lab = color.rgb2lab(im).astype(np.float32)
+ lab_t = transforms.ToTensor()(lab)
+ A = lab_t[[0], ...] / 50.0 - 1.0
+ B = lab_t[[1, 2], ...] / 110.0
+ return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
+
+ def __len__(self):
+ """Return the total number of images in the dataset."""
+ return len(self.AB_paths)
diff --git a/data/image_folder.py b/data/image_folder.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7cb91574a0487c51e5dd8210aebb38edb0b16ef
--- /dev/null
+++ b/data/image_folder.py
@@ -0,0 +1,65 @@
+"""A modified image folder class
+
+We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
+so that this class can load images from both current directory and its subdirectories.
+"""
+
+import torch.utils.data as data
+
+from PIL import Image
+import os
+
+IMG_EXTENSIONS = [
+ '.jpg', '.JPG', '.jpeg', '.JPEG',
+ '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
+ '.tif', '.TIF', '.tiff', '.TIFF',
+]
+
+
+def is_image_file(filename):
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
+
+
+def make_dataset(dir, max_dataset_size=float("inf")):
+ images = []
+ assert os.path.isdir(dir), '%s is not a valid directory' % dir
+
+ for root, _, fnames in sorted(os.walk(dir)):
+ for fname in fnames:
+ if is_image_file(fname):
+ path = os.path.join(root, fname)
+ images.append(path)
+ return images[:min(max_dataset_size, len(images))]
+
+
+def default_loader(path):
+ return Image.open(path).convert('RGB')
+
+
+class ImageFolder(data.Dataset):
+
+ def __init__(self, root, transform=None, return_paths=False,
+ loader=default_loader):
+ imgs = make_dataset(root)
+ if len(imgs) == 0:
+ raise(RuntimeError("Found 0 images in: " + root + "\n"
+ "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
+
+ self.root = root
+ self.imgs = imgs
+ self.transform = transform
+ self.return_paths = return_paths
+ self.loader = loader
+
+ def __getitem__(self, index):
+ path = self.imgs[index]
+ img = self.loader(path)
+ if self.transform is not None:
+ img = self.transform(img)
+ if self.return_paths:
+ return img, path
+ else:
+ return img
+
+ def __len__(self):
+ return len(self.imgs)
diff --git a/data/single_dataset.py b/data/single_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a5c3232f2ff746e73eeb4a7775027796dd20969
--- /dev/null
+++ b/data/single_dataset.py
@@ -0,0 +1,40 @@
+from data.base_dataset import BaseDataset, get_transform
+from data.image_folder import make_dataset
+from PIL import Image
+
+
+class SingleDataset(BaseDataset):
+ """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
+
+ It can be used for generating CycleGAN results only for one side with the model option '-model test'.
+ """
+
+ def __init__(self, opt):
+ """Initialize this dataset class.
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseDataset.__init__(self, opt)
+ self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
+ input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
+ self.transform = get_transform(opt, grayscale=(input_nc == 1))
+
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index - - a random integer for data indexing
+
+ Returns a dictionary that contains A and A_paths
+ A(tensor) - - an image in one domain
+ A_paths(str) - - the path of the image
+ """
+ A_path = self.A_paths[index]
+ A_img = Image.open(A_path).convert('RGB')
+ A = self.transform(A_img)
+ return {'A': A, 'A_paths': A_path}
+
+ def __len__(self):
+ """Return the total number of images in the dataset."""
+ return len(self.A_paths)
diff --git a/data/template_dataset.py b/data/template_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfdf16be2a8a834b204c45d88c86857b37b9bd25
--- /dev/null
+++ b/data/template_dataset.py
@@ -0,0 +1,75 @@
+"""Dataset class template
+
+This module provides a template for users to implement custom datasets.
+You can specify '--dataset_mode template' to use this dataset.
+The class name should be consistent with both the filename and its dataset_mode option.
+The filename should be _dataset.py
+The class name should be Dataset.py
+You need to implement the following functions:
+ -- : Add dataset-specific options and rewrite default values for existing options.
+ -- <__init__>: Initialize this dataset class.
+ -- <__getitem__>: Return a data point and its metadata information.
+ -- <__len__>: Return the number of images.
+"""
+from data.base_dataset import BaseDataset, get_transform
+# from data.image_folder import make_dataset
+# from PIL import Image
+
+
+class TemplateDataset(BaseDataset):
+ """A template dataset class for you to implement custom datasets."""
+ @staticmethod
+ def modify_commandline_options(parser, is_train):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+ """
+ parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
+ parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
+ return parser
+
+ def __init__(self, opt):
+ """Initialize this dataset class.
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+
+ A few things can be done here.
+ - save the options (have been done in BaseDataset)
+ - get image paths and meta information of the dataset.
+ - define the image transformation.
+ """
+ # save the option and dataset root
+ BaseDataset.__init__(self, opt)
+ # get the image paths of your dataset;
+ self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
+ # define the default transform function. You can use ; You can also define your custom transform function
+ self.transform = get_transform(opt)
+
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index -- a random integer for data indexing
+
+ Returns:
+ a dictionary of data with their names. It usually contains the data itself and its metadata information.
+
+ Step 1: get a random image path: e.g., path = self.image_paths[index]
+ Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
+ Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
+ Step 4: return a data point as a dictionary.
+ """
+ path = 'temp' # needs to be a string
+ data_A = None # needs to be a tensor
+ data_B = None # needs to be a tensor
+ return {'data_A': data_A, 'data_B': data_B, 'path': path}
+
+ def __len__(self):
+ """Return the total number of images."""
+ return len(self.image_paths)
diff --git a/data/unaligned_dataset.py b/data/unaligned_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd7af1d5ab2d7623e83decc7232c30cbb83e5367
--- /dev/null
+++ b/data/unaligned_dataset.py
@@ -0,0 +1,71 @@
+import os
+from data.base_dataset import BaseDataset, get_transform
+from data.image_folder import make_dataset
+from PIL import Image
+import random
+
+
+class UnalignedDataset(BaseDataset):
+ """
+ This dataset class can load unaligned/unpaired datasets.
+
+ It requires two directories to host training images from domain A '/path/to/data/trainA'
+ and from domain B '/path/to/data/trainB' respectively.
+ You can train the model with the dataset flag '--dataroot /path/to/data'.
+ Similarly, you need to prepare two directories:
+ '/path/to/data/testA' and '/path/to/data/testB' during test time.
+ """
+
+ def __init__(self, opt):
+ """Initialize this dataset class.
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseDataset.__init__(self, opt)
+ self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
+ self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
+
+ self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
+ self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
+ self.A_size = len(self.A_paths) # get the size of dataset A
+ self.B_size = len(self.B_paths) # get the size of dataset B
+ btoA = self.opt.direction == 'BtoA'
+ input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
+ output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
+ self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
+ self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
+
+ def __getitem__(self, index):
+ """Return a data point and its metadata information.
+
+ Parameters:
+ index (int) -- a random integer for data indexing
+
+ Returns a dictionary that contains A, B, A_paths and B_paths
+ A (tensor) -- an image in the input domain
+ B (tensor) -- its corresponding image in the target domain
+ A_paths (str) -- image paths
+ B_paths (str) -- image paths
+ """
+ A_path = self.A_paths[index % self.A_size] # make sure index is within then range
+ if self.opt.serial_batches: # make sure index is within then range
+ index_B = index % self.B_size
+ else: # randomize the index for domain B to avoid fixed pairs.
+ index_B = random.randint(0, self.B_size - 1)
+ B_path = self.B_paths[index_B]
+ A_img = Image.open(A_path).convert('RGB')
+ B_img = Image.open(B_path).convert('RGB')
+ # apply image transformation
+ A = self.transform_A(A_img)
+ B = self.transform_B(B_img)
+
+ return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
+
+ def __len__(self):
+ """Return the total number of images in the dataset.
+
+ As we have two datasets with potentially different number of images,
+ we take a maximum of
+ """
+ return max(self.A_size, self.B_size)
diff --git a/datasets/bibtex/cityscapes.tex b/datasets/bibtex/cityscapes.tex
new file mode 100644
index 0000000000000000000000000000000000000000..a87bdbf54fe9a5453fc8cf929299ef06d2f47691
--- /dev/null
+++ b/datasets/bibtex/cityscapes.tex
@@ -0,0 +1,6 @@
+@inproceedings{Cordts2016Cityscapes,
+title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
+author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
+booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
+year={2016}
+}
diff --git a/datasets/bibtex/facades.tex b/datasets/bibtex/facades.tex
new file mode 100644
index 0000000000000000000000000000000000000000..08b773e1188a9cfe8ce55b34616cead59c4d9243
--- /dev/null
+++ b/datasets/bibtex/facades.tex
@@ -0,0 +1,7 @@
+@INPROCEEDINGS{Tylecek13,
+ author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra},
+ title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure},
+ booktitle = {Proc. GCPR},
+ year = {2013},
+ address = {Saarbrucken, Germany},
+}
diff --git a/datasets/bibtex/handbags.tex b/datasets/bibtex/handbags.tex
new file mode 100644
index 0000000000000000000000000000000000000000..b79710c7b5344b181a534e04696dd2e75c744ecf
--- /dev/null
+++ b/datasets/bibtex/handbags.tex
@@ -0,0 +1,13 @@
+@inproceedings{zhu2016generative,
+ title={Generative Visual Manipulation on the Natural Image Manifold},
+ author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.},
+ booktitle={Proceedings of European Conference on Computer Vision (ECCV)},
+ year={2016}
+}
+
+@InProceedings{xie15hed,
+ author = {"Xie, Saining and Tu, Zhuowen"},
+ Title = {Holistically-Nested Edge Detection},
+ Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
+ Year = {2015},
+}
diff --git a/datasets/bibtex/shoes.tex b/datasets/bibtex/shoes.tex
new file mode 100644
index 0000000000000000000000000000000000000000..e67e158b945e456b9613f0effb06784cd6682c20
--- /dev/null
+++ b/datasets/bibtex/shoes.tex
@@ -0,0 +1,14 @@
+@InProceedings{fine-grained,
+ author = {A. Yu and K. Grauman},
+ title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning},
+ booktitle = {Computer Vision and Pattern Recognition (CVPR)},
+ month = {June},
+ year = {2014}
+}
+
+@InProceedings{xie15hed,
+ author = {"Xie, Saining and Tu, Zhuowen"},
+ Title = {Holistically-Nested Edge Detection},
+ Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
+ Year = {2015},
+}
diff --git a/datasets/bibtex/transattr.tex b/datasets/bibtex/transattr.tex
new file mode 100644
index 0000000000000000000000000000000000000000..0585849961668d74e058b4a636f4ab0388723ba9
--- /dev/null
+++ b/datasets/bibtex/transattr.tex
@@ -0,0 +1,8 @@
+@article {Laffont14,
+ title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes},
+ author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays},
+ journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)},
+ volume = {33},
+ number = {4},
+ year = {2014}
+}
diff --git a/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_012.jpg b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_012.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..43c82b7511d1c987c2e34cce7c801dd4b9968275
Binary files /dev/null and b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_012.jpg differ
diff --git a/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_015.jpg b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_015.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..00831eef77bdefcc3dbed655003a3723e5ecace0
Binary files /dev/null and b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_015.jpg differ
diff --git a/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_036.jpg b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_036.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..598a42c298f5600aafac40970e4372136dc1805a
Binary files /dev/null and b/datasets/bw2color/testA/- Tomo 01 -_Noragami - Tomo 01 (#001-003)_Noragami 01_036.jpg differ
diff --git a/datasets/bw2color/testA/11_3.png b/datasets/bw2color/testA/11_3.png
new file mode 100644
index 0000000000000000000000000000000000000000..e65983ae58de9b145867286c07a0360ca1ebe790
--- /dev/null
+++ b/datasets/bw2color/testA/11_3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3e3d6c914a1e0cc5c89c3878b5a33c2a84c467d0772f0e5306bf2db2c6e6939
+size 31607788
diff --git a/datasets/bw2color/testA/12_12.png b/datasets/bw2color/testA/12_12.png
new file mode 100644
index 0000000000000000000000000000000000000000..29e6f0863c77c6b5de7c7393149fe1283550ac08
--- /dev/null
+++ b/datasets/bw2color/testA/12_12.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af4d9aab000593ceea4276b1f5a55eed8dd3d548e0745e663a98e8c7b0fc5a40
+size 20756619
diff --git a/datasets/bw2color/testA/13_8.png b/datasets/bw2color/testA/13_8.png
new file mode 100644
index 0000000000000000000000000000000000000000..d710e72407535b6c00cfbb32b86bc90b3061bd7b
--- /dev/null
+++ b/datasets/bw2color/testA/13_8.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc95b15e9a093aa55fbdd5fdd5f206735b48fa7b89d9fcff3e58bbfa4e945185
+size 20408409
diff --git a/datasets/bw2color/testA/48672614.webp b/datasets/bw2color/testA/48672614.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fdd8699c4f8d401441d7ea2d738c5068e9b22a74
Binary files /dev/null and b/datasets/bw2color/testA/48672614.webp differ
diff --git a/datasets/bw2color/testA/7d85f75c.webp b/datasets/bw2color/testA/7d85f75c.webp
new file mode 100644
index 0000000000000000000000000000000000000000..05842ffa9df034b935f6399b4e6db6a426d8aade
Binary files /dev/null and b/datasets/bw2color/testA/7d85f75c.webp differ
diff --git a/datasets/bw2color/trainB/a (12).jpg b/datasets/bw2color/trainB/a (12).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9cd4e0d7d85d3a23bcf404749c9cef90aa95af3c
--- /dev/null
+++ b/datasets/bw2color/trainB/a (12).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f511e651d80217c627f6f9c9bfce927aecf770d7b2da89352e4ab0c363ffcf9e
+size 2775656
diff --git a/datasets/bw2color/trainB/a (188).jpg b/datasets/bw2color/trainB/a (188).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c7d4f773b27cf270842c54e8ec03f2afa6db9742
--- /dev/null
+++ b/datasets/bw2color/trainB/a (188).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4858c99f635d8e3c80076a6bc93b31e394bf39eb74898ef62219606909d0dfe7
+size 2804338
diff --git a/datasets/bw2color/trainB/a (192).jpg b/datasets/bw2color/trainB/a (192).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bd50ad7a657dcf9d0704c7a3ee7ddf1b22a9e7d7
--- /dev/null
+++ b/datasets/bw2color/trainB/a (192).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf151d01c06f8de5b5b60aa6f6325b04f01dde3abd4ed55d05a060127e6b6aff
+size 2789919
diff --git a/datasets/bw2color/trainB/a (197).jpg b/datasets/bw2color/trainB/a (197).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4c446176383026de06d4faa2d7d6dd980e67de43
--- /dev/null
+++ b/datasets/bw2color/trainB/a (197).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec7d2b8e126f8776704fa7261e0f521261027d646d9399c96eea06139fd2ef8
+size 2788543
diff --git a/datasets/bw2color/trainB/a (215).jpg b/datasets/bw2color/trainB/a (215).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2fbb4b55a3d37ca836e67a2b98533cd9f39ce33c
--- /dev/null
+++ b/datasets/bw2color/trainB/a (215).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d586bcd9cbec8cd27d5d9ababc6a41a6c10927398c2211c32ba27367e15eb35a
+size 2768310
diff --git a/datasets/bw2color/trainB/a (231).jpg b/datasets/bw2color/trainB/a (231).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b36ec67a2dcfddf5eaa96583642aaa21a6686542
--- /dev/null
+++ b/datasets/bw2color/trainB/a (231).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f3af75d69997e8a013ab16e4a2f1436c7653f61aa98b521c83097a3e3fc053f
+size 2757205
diff --git a/datasets/bw2color/trainB/a (238).jpg b/datasets/bw2color/trainB/a (238).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..65d73b52430aa2f61a914d1f3485dd37d3c52a44
--- /dev/null
+++ b/datasets/bw2color/trainB/a (238).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f9a584d50a41f0509d047e97b18edbaba07b27d37f9a799a7843db4ac434a509
+size 2797775
diff --git a/datasets/bw2color/trainB/a (254).jpg b/datasets/bw2color/trainB/a (254).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..90acbd5635ec2a8f82bb93eae76097965f9df0de
--- /dev/null
+++ b/datasets/bw2color/trainB/a (254).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6263feb9f22c1a607d8983db92354f2857b82ddb2d3eb4fe04a1728acd46a4b9
+size 2817115
diff --git a/datasets/bw2color/trainB/a (281).jpg b/datasets/bw2color/trainB/a (281).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ab6853a1956295b309453a70876f1fdf3c5d307e
--- /dev/null
+++ b/datasets/bw2color/trainB/a (281).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f605bc6774856a5cfe58b9b280410619102ea7ee3e7f845bc05e7b9f1faf3d31
+size 2765576
diff --git a/datasets/bw2color/trainB/a (288).jpg b/datasets/bw2color/trainB/a (288).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3d7f56b733c42f8fdbb834cf5928dd6a70904e23
--- /dev/null
+++ b/datasets/bw2color/trainB/a (288).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d58fe543212b78322f0cebdc41a481d635e6ed874968392694014279b4b58aa4
+size 2792562
diff --git a/datasets/bw2color/trainB/a (302).jpg b/datasets/bw2color/trainB/a (302).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..757327ff0c231065e58a510a3f51b6bd99860fd7
--- /dev/null
+++ b/datasets/bw2color/trainB/a (302).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec34f53a879a59089013a0ba8d498d5593560ed0c87bf39a55566ea1aa650b2
+size 2792777
diff --git a/datasets/bw2color/trainB/a (307).jpg b/datasets/bw2color/trainB/a (307).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e976a774f4a0c4d73363a00189061f058ddcfc7b
--- /dev/null
+++ b/datasets/bw2color/trainB/a (307).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0da15a0c76e1028fa10ddf029855ac9f7fdcb6f914d22ae0c2c0cd947b359d1d
+size 2802674
diff --git a/datasets/bw2color/trainB/a (310).jpg b/datasets/bw2color/trainB/a (310).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e2b621175c8e6bfb90155482b2f7337569dfc35e
--- /dev/null
+++ b/datasets/bw2color/trainB/a (310).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fea4ed282a2dd063bf1226f53ed4b0a1ed5dfcccbae63aab8074ceafb2375006
+size 2759606
diff --git a/datasets/bw2color/trainB/a (329).jpg b/datasets/bw2color/trainB/a (329).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bc993d7d1255983e3bf9d019cfabc6399efea817
--- /dev/null
+++ b/datasets/bw2color/trainB/a (329).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4f437503a6009c530e4458a10f4669daf3d93eafb5dafec90d5eacbae95bf03
+size 2750039
diff --git a/datasets/bw2color/trainB/a (33).jpg b/datasets/bw2color/trainB/a (33).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..94d281a26b0c22a9099bb7dbe8317160559c09c0
--- /dev/null
+++ b/datasets/bw2color/trainB/a (33).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d638f805367ffeb31bbfebb3abb2cafd7e09094a5f041f482841ebff4b2e809
+size 2806227
diff --git a/datasets/bw2color/trainB/a (374).jpg b/datasets/bw2color/trainB/a (374).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..197e4b7b8b8e1193dddbd7f6f3274498170c466c
--- /dev/null
+++ b/datasets/bw2color/trainB/a (374).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58e09481d5311e7a8b7f578ec306967a5a93bf3b1a6db1df854d306f5d8c04b6
+size 2793523
diff --git a/datasets/bw2color/trainB/a (392).jpg b/datasets/bw2color/trainB/a (392).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e88b1f978387a03f1c03a1872911b1b5f21d670e
--- /dev/null
+++ b/datasets/bw2color/trainB/a (392).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df8ba7b888416cb404ce46a24cfaf92250aeabf773ad25079ee2f49cd64d9257
+size 2818223
diff --git a/datasets/bw2color/trainB/a (41).jpg b/datasets/bw2color/trainB/a (41).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ebb810ccb7734e94d34c33bd68ce81d356de2b7a
--- /dev/null
+++ b/datasets/bw2color/trainB/a (41).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:471e9cd297f05e76eeeb7a2bad765b1c02c728315384428994d3d21925372333
+size 2799745
diff --git a/datasets/bw2color/trainB/a (445).jpg b/datasets/bw2color/trainB/a (445).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..02706a8ffa42f8b182cbfecc00755728dc2e7766
--- /dev/null
+++ b/datasets/bw2color/trainB/a (445).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2b051a3102467154e8b82f74469f91bb10223f0b9d46cc6956829d283693276
+size 2763652
diff --git a/datasets/bw2color/trainB/a (449).jpg b/datasets/bw2color/trainB/a (449).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0444fdb9ac48e13d243930a2a971c8119c746785
--- /dev/null
+++ b/datasets/bw2color/trainB/a (449).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af18676b86ad9232c04d18c74cd8ae11dc127c3b48fafb0c969bba3a2f8d769d
+size 2764870
diff --git a/datasets/bw2color/trainB/a (454).jpg b/datasets/bw2color/trainB/a (454).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..73c282d0a1260733b2997666a363e451a1e7d3cc
--- /dev/null
+++ b/datasets/bw2color/trainB/a (454).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6911829e7c75edd3a59a75a81ace459b11475580068b8e9a98959e9f21e0413e
+size 2793825
diff --git a/datasets/bw2color/trainB/a (512).jpg b/datasets/bw2color/trainB/a (512).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..af3c407e827d0182a80faad89a6d5eb4a0b2280f
--- /dev/null
+++ b/datasets/bw2color/trainB/a (512).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:22b122ab75d1905b565e380879dd57e9a2ef9db4fd6b8199ac6442bd6eeefd01
+size 2800881
diff --git a/datasets/bw2color/trainB/a (517).jpg b/datasets/bw2color/trainB/a (517).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..41708410073f277be1e8070e627d4aee5780ca5f
--- /dev/null
+++ b/datasets/bw2color/trainB/a (517).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00ff7f0127cad3282c698c7b58ae44a83ae37a801d27273b48181efee5e05b0c
+size 2805785
diff --git a/datasets/bw2color/trainB/a (529).jpg b/datasets/bw2color/trainB/a (529).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..eea7bc745d4ae0ec85b22fa463e31d965c355f98
--- /dev/null
+++ b/datasets/bw2color/trainB/a (529).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93401bdba4e70544bab2e5e387a719096c354645cef28bfd79808c1ba51d4542
+size 2801018
diff --git a/datasets/bw2color/trainB/a (533).jpg b/datasets/bw2color/trainB/a (533).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6afd6f5d4fe594419bf8ff0c97bd3e475c9d2617
--- /dev/null
+++ b/datasets/bw2color/trainB/a (533).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a4bbbb8953952b6c14955da34165b76c473f1986fa172f08e264f0793845a3e
+size 2810263
diff --git a/datasets/bw2color/trainB/a (54).jpg b/datasets/bw2color/trainB/a (54).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..157b59a898e547f8ee4ffa0dda17072b4abf00fa
--- /dev/null
+++ b/datasets/bw2color/trainB/a (54).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2d1c1fef6fee3d6388b2fb5efb8162039fd6c16f9e3d95d3aade85a12d37712
+size 2764356
diff --git a/datasets/bw2color/trainB/a (565).jpg b/datasets/bw2color/trainB/a (565).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d2e4dc5a8d6ba0d004fa43f95f4460f87b87a570
--- /dev/null
+++ b/datasets/bw2color/trainB/a (565).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f299d989d343f2416d0a96796e2d18a862b6a49151c57916943a49c2f2801870
+size 2775607
diff --git a/datasets/bw2color/trainB/a (576).jpg b/datasets/bw2color/trainB/a (576).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e08058d17de12023b89be783221e52b8c9944df8
--- /dev/null
+++ b/datasets/bw2color/trainB/a (576).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c87fc7bbb047201d93ea176008ee7d5ec470e2817134999c2b862d699637f433
+size 2766705
diff --git a/datasets/bw2color/trainB/a (587).jpg b/datasets/bw2color/trainB/a (587).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4dbe2bb3e81a03dc232866f65933a265fcd546f4
--- /dev/null
+++ b/datasets/bw2color/trainB/a (587).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d5751e845966c02c5f3a09b138dd0a5af0e71b752f6465dc558aed263a33912
+size 2762818
diff --git a/datasets/bw2color/trainB/a (642).jpg b/datasets/bw2color/trainB/a (642).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..284048741272948a7626d84dde2393b646ac703d
--- /dev/null
+++ b/datasets/bw2color/trainB/a (642).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6115e2616ae1ac2aeaf227bbbdc431efa421ddef89b486661bac2638ed7d731d
+size 2758932
diff --git a/datasets/bw2color/trainB/a (648).jpg b/datasets/bw2color/trainB/a (648).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..073759638aa3d6372b64ccda0789d0e3b2f496b7
--- /dev/null
+++ b/datasets/bw2color/trainB/a (648).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1800885be8e69fdf3db81e00655cecc70f432abc90072edb11cef3a6ce8e589
+size 2760026
diff --git a/datasets/bw2color/trainB/a (670).jpg b/datasets/bw2color/trainB/a (670).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8e250dacac3c16f36ecd018a63ee76bdba52f7df
--- /dev/null
+++ b/datasets/bw2color/trainB/a (670).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1929ec3e0e22c9b1aaade5488dc463414954fe80afeb220c0a80ab2174fb750
+size 2788295
diff --git a/datasets/bw2color/trainB/a (672).jpg b/datasets/bw2color/trainB/a (672).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..414a9b0f7ed42d593090b4a328791d332e5f38e0
--- /dev/null
+++ b/datasets/bw2color/trainB/a (672).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:763da314164e85821e05e7eaf4ef58374556c3d7cfdc0a9fa97b7e187af2450a
+size 2758966
diff --git a/datasets/bw2color/trainB/a (732).jpg b/datasets/bw2color/trainB/a (732).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..35e0861750aad36161334c543ffa50d75a6ae32a
--- /dev/null
+++ b/datasets/bw2color/trainB/a (732).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74375810e21e8de0f0bb6da29e5cd6261be6c2785cac0c9253a0dbe203d2da7f
+size 2814111
diff --git a/datasets/bw2color/trainB/a (751).jpg b/datasets/bw2color/trainB/a (751).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a23affd6614868274902dba511dd2faae121c2e6
--- /dev/null
+++ b/datasets/bw2color/trainB/a (751).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15785096fee1713f834dff0308c65f122403a4abc087e7626b4b31e41ffb6245
+size 2825205
diff --git a/datasets/bw2color/trainB/a (757).jpg b/datasets/bw2color/trainB/a (757).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..12eb4de87339dd49155e273f3c9db4b2dff81196
--- /dev/null
+++ b/datasets/bw2color/trainB/a (757).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fdf66aa9309dc0291d12885543fce2a359eb996be77f63e4d4ba66b25813228
+size 2793128
diff --git a/datasets/bw2color/trainB/a (759).jpg b/datasets/bw2color/trainB/a (759).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b53d57dec963ebedb325637c6bf26646d3ecc76e
--- /dev/null
+++ b/datasets/bw2color/trainB/a (759).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:44da4db8dd97846ea238d28af2a3299a5c8abd9b429b79e0dbaacf9984a7397e
+size 2787463
diff --git a/datasets/bw2color/trainB/a (825).jpg b/datasets/bw2color/trainB/a (825).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9aaf195fc465b32708132c142932d9bee6e50c64
--- /dev/null
+++ b/datasets/bw2color/trainB/a (825).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e8f7c011714db093dd93345c696cb4a9d6df3d880d5bc7fbd50fadf90ad6db4
+size 2763189
diff --git a/datasets/bw2color/trainB/a (834).jpg b/datasets/bw2color/trainB/a (834).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..45f1a47a186b3bc81d17366d65eeb5b7553d74c6
--- /dev/null
+++ b/datasets/bw2color/trainB/a (834).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad2910f5abed36b1918176b1ab633adef6d3ad34e10518c03307bb26ff59b26c
+size 2761704
diff --git a/datasets/bw2color/trainB/a (845).jpg b/datasets/bw2color/trainB/a (845).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..17ef9cd226507554801fec1c08f1c7dfca4f2db5
--- /dev/null
+++ b/datasets/bw2color/trainB/a (845).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8ceac844f502aa4dba388cc086bf326778892ca78a0f455217911c8ab9c39f5
+size 2761712
diff --git a/datasets/bw2color/trainB/a (846).jpg b/datasets/bw2color/trainB/a (846).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..78e598e563d78ac37a4fd2965ed631ee559d7f2b
--- /dev/null
+++ b/datasets/bw2color/trainB/a (846).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bca26aa1e541edf77149aad6e052b9b7eb539fbed66d6b4c8d175a104c0db9e
+size 2785297
diff --git a/datasets/bw2color/trainB/a (849).jpg b/datasets/bw2color/trainB/a (849).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7d3a6003c5fa5b7d097d1dbbb0f6cf0e914c5fe3
--- /dev/null
+++ b/datasets/bw2color/trainB/a (849).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e536603d08a92312e8828773988bf0c486e430f1e3539469ad11d0eb970214ff
+size 2754882
diff --git a/datasets/bw2color/trainB/a (880).jpg b/datasets/bw2color/trainB/a (880).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..622549daa9f8eb3c12213fbb6458028f191d5729
--- /dev/null
+++ b/datasets/bw2color/trainB/a (880).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:525513f034c4e6eed0908913d464f15cbe19921263764e68cddd8f289dd8fb26
+size 2761822
diff --git a/datasets/bw2color/trainB/a (884).jpg b/datasets/bw2color/trainB/a (884).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..27333521645ef605226619cd68bcd6077221a468
--- /dev/null
+++ b/datasets/bw2color/trainB/a (884).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b8c12913a27f9eb95d44c5490a51a3a090f1f843bba4c0fc839ac1be5e0b8d3
+size 2776911
diff --git a/datasets/bw2color/trainB/a (90).jpg b/datasets/bw2color/trainB/a (90).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0ee8985be0846ed1a216a26d9d1882370647d6cd
--- /dev/null
+++ b/datasets/bw2color/trainB/a (90).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:048a0338100ce8829d000c9600293d6291d23a148175eda0ee1646f0225bbf5c
+size 2750215
diff --git a/datasets/bw2color/trainB/a (902).jpg b/datasets/bw2color/trainB/a (902).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4e41865a29f0d7dffdeb804e57438425b5ce36b4
--- /dev/null
+++ b/datasets/bw2color/trainB/a (902).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7850a2beb9bea16cd77a5f001a0526c587c82da0e0bc1efd92f46b52b4df05b6
+size 2789036
diff --git a/datasets/bw2color/trainB/a (93).jpg b/datasets/bw2color/trainB/a (93).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3532910388996dedfd791fd8ee5f4ab3e1c3256d
--- /dev/null
+++ b/datasets/bw2color/trainB/a (93).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ea918daff11574a7ae6530480eaa56249e770a3735da538cb5ba941aaf652df
+size 2778789
diff --git a/datasets/bw2color/trainB/a (940).jpg b/datasets/bw2color/trainB/a (940).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c2bca10b9982c0c413137b5e430bbf27f73c8aeb
--- /dev/null
+++ b/datasets/bw2color/trainB/a (940).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd573b2661971d746789c43c5325b7ec8b8166145c5f2d859b4250f9dc063016
+size 2796028
diff --git a/datasets/bw2color/trainB/a (95).jpg b/datasets/bw2color/trainB/a (95).jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bb7ec75f7439bd5818ca74dbf29037c2a111a6ef
--- /dev/null
+++ b/datasets/bw2color/trainB/a (95).jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:661f90560e44d403131e0439ec1a2f0e31bc1670c9eda5872686b56a6564d45c
+size 2792676
diff --git a/datasets/combine_A_and_B.py b/datasets/combine_A_and_B.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3d88aa318e743b6bb0cd3acf17934a8e0b7fccb
--- /dev/null
+++ b/datasets/combine_A_and_B.py
@@ -0,0 +1,67 @@
+import os
+import numpy as np
+import cv2
+import argparse
+from multiprocessing import Pool
+
+
+def image_write(path_A, path_B, path_AB):
+ im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
+ im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
+ im_AB = np.concatenate([im_A, im_B], 1)
+ cv2.imwrite(path_AB, im_AB)
+
+
+parser = argparse.ArgumentParser('create image pairs')
+parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
+parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
+parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
+parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
+parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
+parser.add_argument('--no_multiprocessing', dest='no_multiprocessing', help='If used, chooses single CPU execution instead of parallel execution', action='store_true',default=False)
+args = parser.parse_args()
+
+for arg in vars(args):
+ print('[%s] = ' % arg, getattr(args, arg))
+
+splits = os.listdir(args.fold_A)
+
+if not args.no_multiprocessing:
+ pool=Pool()
+
+for sp in splits:
+ img_fold_A = os.path.join(args.fold_A, sp)
+ img_fold_B = os.path.join(args.fold_B, sp)
+ img_list = os.listdir(img_fold_A)
+ if args.use_AB:
+ img_list = [img_path for img_path in img_list if '_A.' in img_path]
+
+ num_imgs = min(args.num_imgs, len(img_list))
+ print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
+ img_fold_AB = os.path.join(args.fold_AB, sp)
+ if not os.path.isdir(img_fold_AB):
+ os.makedirs(img_fold_AB)
+ print('split = %s, number of images = %d' % (sp, num_imgs))
+ for n in range(num_imgs):
+ name_A = img_list[n]
+ path_A = os.path.join(img_fold_A, name_A)
+ if args.use_AB:
+ name_B = name_A.replace('_A.', '_B.')
+ else:
+ name_B = name_A
+ path_B = os.path.join(img_fold_B, name_B)
+ if os.path.isfile(path_A) and os.path.isfile(path_B):
+ name_AB = name_A
+ if args.use_AB:
+ name_AB = name_AB.replace('_A.', '.') # remove _A
+ path_AB = os.path.join(img_fold_AB, name_AB)
+ if not args.no_multiprocessing:
+ pool.apply_async(image_write, args=(path_A, path_B, path_AB))
+ else:
+ im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
+ im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
+ im_AB = np.concatenate([im_A, im_B], 1)
+ cv2.imwrite(path_AB, im_AB)
+if not args.no_multiprocessing:
+ pool.close()
+ pool.join()
diff --git a/datasets/download_cyclegan_dataset.sh b/datasets/download_cyclegan_dataset.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5d0b71a60c387ae34b125afad7f109680a75adc3
--- /dev/null
+++ b/datasets/download_cyclegan_dataset.sh
@@ -0,0 +1,21 @@
+FILE=$1
+
+if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "mini" && $FILE != "mini_pix2pix" && $FILE != "mini_colorization" ]]; then
+ echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos"
+ exit 1
+fi
+
+if [[ $FILE == "cityscapes" ]]; then
+ echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
+ echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
+ exit 1
+fi
+
+echo "Specified [$FILE]"
+URL=http://efrosgans.eecs.berkeley.edu/cyclegan/datasets/$FILE.zip
+ZIP_FILE=./datasets/$FILE.zip
+TARGET_DIR=./datasets/$FILE/
+wget -N $URL -O $ZIP_FILE
+mkdir $TARGET_DIR
+unzip $ZIP_FILE -d ./datasets/
+rm $ZIP_FILE
diff --git a/datasets/download_pix2pix_dataset.sh b/datasets/download_pix2pix_dataset.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4cfbfb1fb00d4c3232790f8f098d7fb244ac7018
--- /dev/null
+++ b/datasets/download_pix2pix_dataset.sh
@@ -0,0 +1,22 @@
+FILE=$1
+
+if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then
+ echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps"
+ exit 1
+fi
+
+if [[ $FILE == "cityscapes" ]]; then
+ echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
+ echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
+ exit 1
+fi
+
+echo "Specified [$FILE]"
+
+URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz
+TAR_FILE=./datasets/$FILE.tar.gz
+TARGET_DIR=./datasets/$FILE/
+wget -N $URL -O $TAR_FILE
+mkdir -p $TARGET_DIR
+tar -zxvf $TAR_FILE -C ./datasets/
+rm $TAR_FILE
diff --git a/datasets/make_dataset_aligned.py b/datasets/make_dataset_aligned.py
new file mode 100644
index 0000000000000000000000000000000000000000..739c767948c2f47bef16a88d552c25bdb12c6e3f
--- /dev/null
+++ b/datasets/make_dataset_aligned.py
@@ -0,0 +1,63 @@
+import os
+
+from PIL import Image
+
+
+def get_file_paths(folder):
+ image_file_paths = []
+ for root, dirs, filenames in os.walk(folder):
+ filenames = sorted(filenames)
+ for filename in filenames:
+ input_path = os.path.abspath(root)
+ file_path = os.path.join(input_path, filename)
+ if filename.endswith('.png') or filename.endswith('.jpg'):
+ image_file_paths.append(file_path)
+
+ break # prevent descending into subfolders
+ return image_file_paths
+
+
+def align_images(a_file_paths, b_file_paths, target_path):
+ if not os.path.exists(target_path):
+ os.makedirs(target_path)
+
+ for i in range(len(a_file_paths)):
+ img_a = Image.open(a_file_paths[i])
+ img_b = Image.open(b_file_paths[i])
+ assert(img_a.size == img_b.size)
+
+ aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
+ aligned_image.paste(img_a, (0, 0))
+ aligned_image.paste(img_b, (img_a.size[0], 0))
+ aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--dataset-path',
+ dest='dataset_path',
+ help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
+ )
+ args = parser.parse_args()
+
+ dataset_folder = args.dataset_path
+ print(dataset_folder)
+
+ test_a_path = os.path.join(dataset_folder, 'testA')
+ test_b_path = os.path.join(dataset_folder, 'testB')
+ test_a_file_paths = get_file_paths(test_a_path)
+ test_b_file_paths = get_file_paths(test_b_path)
+ assert(len(test_a_file_paths) == len(test_b_file_paths))
+ test_path = os.path.join(dataset_folder, 'test')
+
+ train_a_path = os.path.join(dataset_folder, 'trainA')
+ train_b_path = os.path.join(dataset_folder, 'trainB')
+ train_a_file_paths = get_file_paths(train_a_path)
+ train_b_file_paths = get_file_paths(train_b_path)
+ assert(len(train_a_file_paths) == len(train_b_file_paths))
+ train_path = os.path.join(dataset_folder, 'train')
+
+ align_images(test_a_file_paths, test_b_file_paths, test_path)
+ align_images(train_a_file_paths, train_b_file_paths, train_path)
diff --git a/datasets/prepare_cityscapes_dataset.py b/datasets/prepare_cityscapes_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5b2a20d004b17cf04c34f1d95b8f509aa16228f
--- /dev/null
+++ b/datasets/prepare_cityscapes_dataset.py
@@ -0,0 +1,99 @@
+import os
+import glob
+from PIL import Image
+
+help_msg = """
+The dataset can be downloaded from https://cityscapes-dataset.com.
+Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them.
+gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory.
+leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory.
+The processed images will be placed at --output_dir.
+
+Example usage:
+
+python prepare_cityscapes_dataset.py --gtFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/
+"""
+
+def load_resized_img(path):
+ return Image.open(path).convert('RGB').resize((256, 256))
+
+def check_matching_pair(segmap_path, photo_path):
+ segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '')
+ photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '')
+
+ assert segmap_identifier == photo_identifier, \
+ "[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path)
+
+
+def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase):
+ save_phase = 'test' if phase == 'val' else 'train'
+ savedir = os.path.join(output_dir, save_phase)
+ os.makedirs(savedir, exist_ok=True)
+ os.makedirs(savedir + 'A', exist_ok=True)
+ os.makedirs(savedir + 'B', exist_ok=True)
+ print("Directory structure prepared at %s" % output_dir)
+
+ segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png"
+ segmap_paths = glob.glob(segmap_expr)
+ segmap_paths = sorted(segmap_paths)
+
+ photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png"
+ photo_paths = glob.glob(photo_expr)
+ photo_paths = sorted(photo_paths)
+
+ assert len(segmap_paths) == len(photo_paths), \
+ "%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr)
+
+ for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)):
+ check_matching_pair(segmap_path, photo_path)
+ segmap = load_resized_img(segmap_path)
+ photo = load_resized_img(photo_path)
+
+ # data for pix2pix where the two images are placed side-by-side
+ sidebyside = Image.new('RGB', (512, 256))
+ sidebyside.paste(segmap, (256, 0))
+ sidebyside.paste(photo, (0, 0))
+ savepath = os.path.join(savedir, "%d.jpg" % i)
+ sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100)
+
+ # data for cyclegan where the two images are stored at two distinct directories
+ savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i)
+ photo.save(savepath, format='JPEG', subsampling=0, quality=100)
+ savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i)
+ segmap.save(savepath, format='JPEG', subsampling=0, quality=100)
+
+ if i % (len(segmap_paths) // 10) == 0:
+ print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath))
+
+
+
+
+
+
+
+
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--gtFine_dir', type=str, required=True,
+ help='Path to the Cityscapes gtFine directory.')
+ parser.add_argument('--leftImg8bit_dir', type=str, required=True,
+ help='Path to the Cityscapes leftImg8bit_trainvaltest directory.')
+ parser.add_argument('--output_dir', type=str, required=True,
+ default='./datasets/cityscapes',
+ help='Directory the output images will be written to.')
+ opt = parser.parse_args()
+
+ print(help_msg)
+
+ print('Preparing Cityscapes Dataset for val phase')
+ process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val")
+ print('Preparing Cityscapes Dataset for train phase')
+ process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train")
+
+ print('Done')
+
+
+
diff --git a/docs/Dockerfile b/docs/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..0184be942f208affb7c3f7e95070666064d0e2a3
--- /dev/null
+++ b/docs/Dockerfile
@@ -0,0 +1,16 @@
+FROM nvidia/cuda:10.1-base
+
+#Nvidia Public GPG Key
+RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
+
+RUN apt update && apt install -y wget unzip curl bzip2 git
+RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
+RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b
+RUN rm Miniconda3-latest-Linux-x86_64.sh
+ENV PATH=/miniconda/bin:${PATH}
+RUN conda update -y conda
+
+RUN conda install -y pytorch torchvision -c pytorch
+RUN mkdir /workspace/ && cd /workspace/ && git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git && cd pytorch-CycleGAN-and-pix2pix && pip install -r requirements.txt
+
+WORKDIR /workspace
diff --git a/docs/README_es.md b/docs/README_es.md
new file mode 100644
index 0000000000000000000000000000000000000000..171ac82d57d0278c146cba2315d7df0ecfd2ea29
--- /dev/null
+++ b/docs/README_es.md
@@ -0,0 +1,238 @@
+
+
+
+
+# CycleGAN y pix2pix en PyTorch
+
+Implementacion en PyTorch de Unpaired Image-to-Image Translation.
+
+Este codigo fue escrito por [Jun-Yan Zhu](https://github.com/junyanz) y [Taesung Park](https://github.com/taesung), y con ayuda de [Tongzhou Wang](https://ssnl.github.io/).
+
+Esta implementacion de PyTorch produce resultados comparables o mejores que nuestros original software de Torch. Si te gustaria producir los mismos resultados que en documento oficial, echa un vistazo al codigo original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) y [pix2pix Torch](https://github.com/phillipi/pix2pix)
+
+**Aviso**: El software actual funciona correctamente en PyTorch 0.41+. Para soporte en PyTorch 0.1-0.3: [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1).
+
+Puede encontrar información útil en [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md) y [preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md). Para implementar modelos y conjuntos de datos personalizados, consulte nuestro [templates](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/README_es.md#modelo-y-dataset-personalizado). Para ayudar a los usuarios a comprender y adaptar mejor nuestra base de código, proporcionamos un [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) de la estructura de código de este repositorio.
+
+**CycleGAN: [Proyecto](https://junyanz.github.io/CycleGAN/) | [PDF](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN) |
+[Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb)**
+
+
+
+**Pix2pix: [Proyeto](https://phillipi.github.io/pix2pix/) | [PDF](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix) |
+[Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)**
+
+
+
+
+**[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | por [Christopher Hesse](https://twitter.com/christophrhesse)**
+
+
+
+Si usa este código para su investigación, cite:
+
+Unpaired Image-to-Image Translation usando Cycle-Consistent Adversarial Networks.
+[Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In ICCV 2017. (* contribucion igualitaria) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt)
+
+
+Image-to-Image Translation usando Conditional Adversarial Networks.
+[Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In CVPR 2017. [[Bibtex]](https://www.cs.cmu.edu/~junyanz/projects/pix2pix/pix2pix.bib)
+
+## Charlas y curso
+Presentacion en PowerPoint de Pix2pix: [keynote](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.key) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.pdf),
+Presentacion en PowerPoint de CycleGAN: [pptx](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pptx) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pdf)
+
+Asignación del curso CycleGAN [codigo](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) y [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) diseñado por el Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" en la universidad de Toronto. Póngase en contacto con el instructor si desea adoptarlo en su curso.
+
+## Colab Notebook
+TensorFlow Core CycleGAN Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb)
+
+Guia de TensorFlow Core pix2pix : [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb)
+
+PyTorch Colab notebook: [CycleGAN](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb) y [pix2pix](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)
+
+## Otras implementaciones
+### CycleGAN
+ [Tensorflow] (por Harry Yang),
+[Tensorflow] (por Archit Rathore),
+[Tensorflow] (por Van Huy),
+[Tensorflow] (por Xiaowei Hu),
+ [Tensorflow-simple] (por Zhenliang He),
+ [TensorLayer] (por luoxier),
+[Chainer] (por Yanghua Jin),
+[Minimal PyTorch] (por yunjey),
+[Mxnet] (por Ldpe2G),
+[lasagne/Keras] (por tjwei),
+[Keras] (por Simon Karlsson)
+
+
+
+### pix2pix
+ [Tensorflow] (por Christopher Hesse),
+[Tensorflow] (por Eyyüb Sariu),
+ [Tensorflow (face2face)] (por Dat Tran),
+ [Tensorflow (film)] (por Arthur Juliani),
+[Tensorflow (zi2zi)] (por Yuchen Tian),
+[Chainer] (por mattya),
+[tf/torch/keras/lasagne] (por tjwei),
+[Pytorch] (por taey16)
+
+
+
+## Requerimientos
+- Linux o macOS
+- Python 3
+- CPU o NVIDIA GPU usando CUDA CuDNN
+
+## Inicio
+### Instalación
+
+- Clone este repositorio:
+```bash
+git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
+cd pytorch-CycleGAN-and-pix2pix
+```
+
+- Instale [PyTorch](http://pytorch.org) 0.4+ y sus otras dependencias (e.g., torchvision, [visdom](https://github.com/facebookresearch/visdom) y [dominate](https://github.com/Knio/dominate)).
+ - Para uso de pip, por favor escriba el comando `pip install -r requirements.txt`.
+ - Para uso de Conda, proporcionamos un script de instalación `./scripts/conda_deps.sh`. De forma alterna, puede crear un nuevo entorno Conda usando `conda env create -f environment.yml`.
+ - Para uso de Docker, Proporcionamos la imagen Docker y el archivo Docker preconstruidos. Por favor, consulte nuestra página
+ [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md).
+
+### CycleGAN entreanimiento/test
+- Descargar el dataset de CycleGAN (e.g. maps):
+```bash
+bash ./datasets/download_cyclegan_dataset.sh maps
+```
+- Para ver los resultados del entrenamiento y las gráficas de pérdidas, `python -m visdom.server` y haga clic en la URL
+ http://localhost:8097.
+- Entrenar el modelo:
+```bash
+#!./scripts/train_cyclegan.sh
+python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+```
+Para ver más resultados intermedios, consulte `./checkpoints/maps_cyclegan/web/index.html`.
+- Pruebe el modelo:
+```bash
+#!./scripts/test_cyclegan.sh
+python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+```
+-Los resultados de la prueba se guardarán en un archivo html aquí: `./results/maps_cyclegan/latest_test/index.html`.
+
+### pix2pix entrenamiento/test
+- Descargue el dataset de pix2pix (e.g.[facades](http://cmp.felk.cvut.cz/~tylecr1/facade/)):
+```bash
+bash ./datasets/download_pix2pix_dataset.sh facades
+```
+- Para ver los resultados del entrenamiento y las gráficas de pérdidas `python -m visdom.server`, haga clic en la URL http://localhost:8097.
+- Para entrenar el modelo:
+```bash
+#!./scripts/train_pix2pix.sh
+python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+```
+Para ver más resultados intermedios, consulte `./checkpoints/facades_pix2pix/web/index.html`.
+
+- Pruebe el modelo (`bash ./scripts/test_pix2pix.sh`):
+```bash
+#!./scripts/test_pix2pix.sh
+python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+```
+- Los resultados de la prueba se guardarán en un archivo html aquí: `./results/facades_pix2pix/test_latest/index.html`. Puede encontrar más scripts en `scripts` directory.
+- Para entrenar y probar modelos de colorización basados en pix2pix, agregue la linea `--model colorization` y `--dataset_mode colorization`. Para más detalles de nuestro entrenamiento [tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#notes-on-colorization).
+
+### Aplicar un modelo pre-entrenado (CycleGAN)
+- Puedes descargar un modelo previamente entrenado (e.g. horse2zebra) con el siguiente script:
+```bash
+bash ./scripts/download_cyclegan_model.sh horse2zebra
+```
+- El modelo pre-entrenado se guarda en `./checkpoints/{name}_pretrained/latest_net_G.pth`. Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) para todos los modelos CycleGAN disponibles.
+
+- Para probar el modelo, también debe descargar el dataset horse2zebra:
+```bash
+bash ./datasets/download_cyclegan_dataset.sh horse2zebra
+```
+
+- Luego genere los resultados usando:
+```bash
+python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
+```
+- La opcion `--model test` ise usa para generar resultados de CycleGAN de un solo lado. Esta opción configurará automáticamente
+ `--dataset_mode single`, carga solo las imágenes de un conjunto. Por el contrario, el uso de `--model cycle_gan` requiere cargar y generar resultados en ambas direcciones, lo que a veces es innecesario. Los resultados se guardarán en `./results/`. Use `--results_dir {directory_path_to_save_result}` para especificar el directorio de resultados.
+
+- Para sus propios experimentos, es posible que desee especificar `--netG`, `--norm`, `--no_dropout` para que coincida con la arquitectura del generador del modelo entrenado.
+
+### Aplicar un modelo pre-entrenado (pix2pix)
+Descargue un modelo pre-entrenado con `./scripts/download_pix2pix_model.sh`.
+
+- Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) para todos los modelos pix2pix disponibles. Por ejemplo, si desea descargar el modelo label2photo en el dataset:
+```bash
+bash ./scripts/download_pix2pix_model.sh facades_label2photo
+```
+- Descarga el dataset facades de pix2pix:
+```bash
+bash ./datasets/download_pix2pix_dataset.sh facades
+```
+- Luego genere los resultados usando:
+```bash
+python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained
+```
+- Tenga en cuenta que `--direction BtoA` como Facades dataset's, son direcciones A o B para etiquetado de fotos.
+
+- Si desea aplicar un modelo previamente entrenado a una colección de imágenes de entrada (en lugar de pares de imágenes), use la opcion `--model test`. Vea `./scripts/test_single.sh` obre cómo aplicar un modelo a Facade label maps (almacenados en el directorio `facades/testB`).
+
+- Vea una lista de los modelos disponibles actualmente en `./scripts/download_pix2pix_model.sh`
+
+## [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md)
+Proporcionamos la imagen Docker y el archivo Docker preconstruidos que pueden ejecutar este repositorio de código. Ver [docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md).
+
+## [Datasets](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md)
+Descargue los conjuntos de datos pix2pix / CycleGAN y cree sus propios conjuntos de datos.
+
+## [Entretanimiento/Test Tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md)
+Las mejores prácticas para entrenar y probar sus modelos.
+
+## [Preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md)
+Antes de publicar una nueva pregunta, primero mire las preguntas y respuestas anteriores y los problemas existentes de GitHub.
+
+## Modelo y Dataset personalizado
+Si planea implementar modelos y conjuntos de datos personalizados para sus nuevas aplicaciones, proporcionamos un conjunto de datos [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/template_dataset.py) y un modelo [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/template_model.py) como punto de partida.
+
+
+## [Estructura de codigo](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md)
+Para ayudar a los usuarios a comprender mejor y usar nuestro código, presentamos brevemente la funcionalidad e implementación de cada paquete y cada módulo.
+
+## Solicitud de Pull
+Siempre puede contribuir a este repositorio enviando un [pull request](https://help.github.com/articles/about-pull-requests/).
+Por favor ejecute `flake8 --ignore E501 .` y `python ./scripts/test_before_push.py` antes de realizar un Pull en el código, asegure de también actualizar la estructura del código [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) en consecuencia si agrega o elimina archivos.
+
+
+## Citación
+Si utiliza este código para su investigación, cite nuestros documentos.
+```
+@inproceedings{CycleGAN2017,
+ title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss},
+ author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A},
+ booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on},
+ year={2017}
+}
+
+
+@inproceedings{isola2017image,
+ title={Image-to-Image Translation with Conditional Adversarial Networks},
+ author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
+ booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on},
+ year={2017}
+}
+```
+
+## Proyectos relacionados
+**[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) |
+[pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD)|
+[BicycleGAN](https://github.com/junyanz/BicycleGAN) | [vid2vid](https://tcwang0509.github.io/vid2vid/) | [SPADE/GauGAN](https://github.com/NVlabs/SPADE)**
+**[iGAN](https://github.com/junyanz/iGAN) | [GAN Dissection](https://github.com/CSAILVision/GANDissect) | [GAN Paint](http://ganpaint.io/)**
+
+## Cat Paper Collection
+Si amas a los gatos y te encanta leer gráficos geniales, computer vision y documentos de aprendizaje, echa un vistazo a Cat Paper [Collection](https://github.com/junyanz/CatPapers).
+
+## Agradecimientos
+Nuestro código fue inspirado en [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan).
diff --git a/docs/datasets.md b/docs/datasets.md
new file mode 100644
index 0000000000000000000000000000000000000000..b53c7db4b6db1800c735e2e5bec93ebe73cc9a30
--- /dev/null
+++ b/docs/datasets.md
@@ -0,0 +1,44 @@
+
+
+### CycleGAN Datasets
+Download the CycleGAN datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
+```bash
+bash ./datasets/download_cyclegan_dataset.sh dataset_name
+```
+- `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
+- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`.
+- `maps`: 1096 training images scraped from Google Maps.
+- `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra`
+- `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`.
+- `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper.
+- `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853.
+- `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper.
+
+To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data.
+
+You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails.
+
+### pix2pix datasets
+Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
+```bash
+bash ./datasets/download_pix2pix_dataset.sh dataset_name
+```
+- `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
+- `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]
+- `maps`: 1096 training images scraped from Google Maps
+- `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)]
+- `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)]
+- `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`.
+
+We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
+
+Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
+
+Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
+
+Once the data is formatted this way, call:
+```bash
+python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
+```
+
+This will combine each pair of images (A,B) into a single image file, ready for training.
diff --git a/docs/docker.md b/docs/docker.md
new file mode 100644
index 0000000000000000000000000000000000000000..74043e3a37e57fbf47bc37a4d489ca457ffd7858
--- /dev/null
+++ b/docs/docker.md
@@ -0,0 +1,38 @@
+# Docker image with pytorch-CycleGAN-and-pix2pix
+
+We provide both Dockerfile and pre-built Docker container that can run this code repo.
+
+## Prerequisite
+
+- Install [docker-ce](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
+- Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker#quickstart)
+
+## Running pre-built Dockerfile
+
+- Pull the pre-built docker file
+
+```bash
+docker pull taesungp/pytorch-cyclegan-and-pix2pix
+```
+
+- Start an interactive docker session. `-p 8097:8097` option is needed if you want to run `visdom` server on the Docker container.
+
+```bash
+nvidia-docker run -it -p 8097:8097 taesungp/pytorch-cyclegan-and-pix2pix
+```
+
+- Now you are in the Docker environment. Go to our code repo and start running things.
+```bash
+cd /workspace/pytorch-CycleGAN-and-pix2pix
+bash datasets/download_pix2pix_dataset.sh facades
+python -m visdom.server &
+bash scripts/train_pix2pix.sh
+```
+
+## Running with Dockerfile
+
+We also posted the [Dockerfile](Dockerfile). To generate the pre-built file, download the Dockerfile in this directory and run
+```bash
+docker build -t [target_tag] .
+```
+in the directory that contains the Dockerfile.
diff --git a/docs/overview.md b/docs/overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..5db2ae9c1ecdc7b25e93d11c89b0170de38be98f
--- /dev/null
+++ b/docs/overview.md
@@ -0,0 +1,45 @@
+## Overview of Code Structure
+To help users better understand and use our codebase, we briefly overview the functionality and implementation of each package and each module. Please see the documentation in each file for more details. If you have questions, you may find useful information in [training/test tips](tips.md) and [frequently asked questions](qa.md).
+
+[train.py](../train.py) is a general-purpose training script. It works for various models (with option `--model`: e.g., `pix2pix`, `cyclegan`, `colorization`) and different datasets (with option `--dataset_mode`: e.g., `aligned`, `unaligned`, `single`, `colorization`). See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
+
+[test.py](../test.py) is a general-purpose test script. Once you have trained your model with `train.py`, you can use this script to test the model. It will load a saved model from `--checkpoints_dir` and save the results to `--results_dir`. See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
+
+
+[data](../data) directory contains all the modules related to data loading and preprocessing. To add a custom dataset class called `dummy`, you need to add a file called `dummy_dataset.py` and define a subclass `DummyDataset` inherited from `BaseDataset`. You need to implement four functions: `__init__` (initialize the class, you need to first call `BaseDataset.__init__(self, opt)`), `__len__` (return the size of dataset), `__getitem__` (get a data point), and optionally `modify_commandline_options` (add dataset-specific options and set default options). Now you can use the dataset class by specifying flag `--dataset_mode dummy`. See our template dataset [class](../data/template_dataset.py) for an example. Below we explain each file in details.
+
+* [\_\_init\_\_.py](../data/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from data import create_dataset` and `dataset = create_dataset(opt)` to create a dataset given the option `opt`.
+* [base_dataset.py](../data/base_dataset.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for datasets. It also includes common transformation functions (e.g., `get_transform`, `__scale_width`), which can be later used in subclasses.
+* [image_folder.py](../data/image_folder.py) implements an image folder class. We modify the official PyTorch image folder [code](https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both the current directory and its subdirectories.
+* [template_dataset.py](../data/template_dataset.py) provides a dataset template with detailed documentation. Check out this file if you plan to implement your own dataset.
+* [aligned_dataset.py](../data/aligned_dataset.py) includes a dataset class that can load image pairs. It assumes a single image directory `/path/to/data/train`, which contains image pairs in the form of {A,B}. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#prepare-your-own-datasets-for-pix2pix) on how to prepare aligned datasets. During test time, you need to prepare a directory `/path/to/data/test` as test data.
+* [unaligned_dataset.py](../data/unaligned_dataset.py) includes a dataset class that can load unaligned/unpaired datasets. It assumes that two directories to host training images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB` respectively. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Similarly, you need to prepare two directories `/path/to/data/testA` and `/path/to/data/testB` during test time.
+* [single_dataset.py](../data/single_dataset.py) includes a dataset class that can load a set of single images specified by the path `--dataroot /path/to/data`. It can be used for generating CycleGAN results only for one side with the model option `-model test`.
+* [colorization_dataset.py](../data/colorization_dataset.py) implements a dataset class that can load a set of nature images in RGB, and convert RGB format into (L, ab) pairs in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. It is required by pix2pix-based colorization model (`--model colorization`).
+
+
+[models](../models) directory contains modules related to objective functions, optimizations, and network architectures. To add a custom model class called `dummy`, you need to add a file called `dummy_model.py` and define a subclass `DummyModel` inherited from `BaseModel`. You need to implement four functions: `__init__` (initialize the class; you need to first call `BaseModel.__init__(self, opt)`), `set_input` (unpack data from dataset and apply preprocessing), `forward` (generate intermediate results), `optimize_parameters` (calculate loss, gradients, and update network weights), and optionally `modify_commandline_options` (add model-specific options and set default options). Now you can use the model class by specifying flag `--model dummy`. See our template model [class](../models/template_model.py) for an example. Below we explain each file in details.
+
+* [\_\_init\_\_.py](../models/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from models import create_model` and `model = create_model(opt)` to create a model given the option `opt`. You also need to call `model.setup(opt)` to properly initialize the model.
+* [base_model.py](../models/base_model.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for models. It also includes commonly used helper functions (e.g., `setup`, `test`, `update_learning_rate`, `save_networks`, `load_networks`), which can be later used in subclasses.
+* [template_model.py](../models/template_model.py) provides a model template with detailed documentation. Check out this file if you plan to implement your own model.
+* [pix2pix_model.py](../models/pix2pix_model.py) implements the pix2pix [model](https://phillipi.github.io/pix2pix/), for learning a mapping from input images to output images given paired data. The model training requires `--dataset_mode aligned` dataset. By default, it uses a `--netG unet256` [U-Net](https://arxiv.org/pdf/1505.04597.pdf) generator, a `--netD basic` discriminator (PatchGAN), and a `--gan_mode vanilla` GAN loss (standard cross-entropy objective).
+* [colorization_model.py](../models/colorization_model.py) implements a subclass of `Pix2PixModel` for image colorization (black & white image to colorful image). The model training requires `-dataset_model colorization` dataset. It trains a pix2pix model, mapping from L channel to ab channels in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. By default, the `colorization` dataset will automatically set `--input_nc 1` and `--output_nc 2`.
+* [cycle_gan_model.py](../models/cycle_gan_model.py) implements the CycleGAN [model](https://junyanz.github.io/CycleGAN/), for learning image-to-image translation without paired data. The model training requires `--dataset_mode unaligned` dataset. By default, it uses a `--netG resnet_9blocks` ResNet generator, a `--netD basic` discriminator (PatchGAN introduced by pix2pix), and a least-square GANs [objective](https://arxiv.org/abs/1611.04076) (`--gan_mode lsgan`).
+* [networks.py](../models/networks.py) module implements network architectures (both generators and discriminators), as well as normalization layers, initialization methods, optimization scheduler (i.e., learning rate policy), and GAN objective function (`vanilla`, `lsgan`, `wgangp`).
+* [test_model.py](../models/test_model.py) implements a model that can be used to generate CycleGAN results for only one direction. This model will automatically set `--dataset_mode single`, which only loads the images from one set. See the test [instruction](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details.
+
+[options](../options) directory includes our option modules: training options, test options, and basic options (used in both training and test). `TrainOptions` and `TestOptions` are both subclasses of `BaseOptions`. They will reuse the options defined in `BaseOptions`.
+* [\_\_init\_\_.py](../options/__init__.py) is required to make Python treat the directory `options` as containing packages,
+* [base_options.py](../options/base_options.py) includes options that are used in both training and test. It also implements a few helper functions such as parsing, printing, and saving the options. It also gathers additional options defined in `modify_commandline_options` functions in both dataset class and model class.
+* [train_options.py](../options/train_options.py) includes options that are only used during training time.
+* [test_options.py](../options/test_options.py) includes options that are only used during test time.
+
+
+[util](../util) directory includes a miscellaneous collection of useful helper functions.
+ * [\_\_init\_\_.py](../util/__init__.py) is required to make Python treat the directory `util` as containing packages,
+ * [get_data.py](../util/get_data.py) provides a Python script for downloading CycleGAN and pix2pix datasets. Alternatively, You can also use bash scripts such as [download_pix2pix_model.sh](../scripts/download_pix2pix_model.sh) and [download_cyclegan_model.sh](../scripts/download_cyclegan_model.sh).
+ * [html.py](../util/html.py) implements a module that saves images into a single HTML file. It consists of functions such as `add_header` (add a text header to the HTML file), `add_images` (add a row of images to the HTML file), `save` (save the HTML to the disk). It is based on Python library `dominate`, a Python library for creating and manipulating HTML documents using a DOM API.
+ * [image_pool.py](../util/image_pool.py) implements an image buffer that stores previously generated images. This buffer enables us to update discriminators using a history of generated images rather than the ones produced by the latest generators. The original idea was discussed in this [paper](http://openaccess.thecvf.com/content_cvpr_2017/papers/Shrivastava_Learning_From_Simulated_CVPR_2017_paper.pdf). The size of the buffer is controlled by the flag `--pool_size`.
+ * [visualizer.py](../util/visualizer.py) includes several functions that can display/save images and print/save logging information. It uses a Python library `visdom` for display and a Python library `dominate` (wrapped in `HTML`) for creating HTML files with images.
+ * [util.py](../util/util.py) consists of simple helper functions such as `tensor2im` (convert a tensor array to a numpy image array), `diagnose_network` (calculate and print the mean of average absolute value of gradients), and `mkdirs` (create multiple directories).
diff --git a/docs/qa.md b/docs/qa.md
new file mode 100644
index 0000000000000000000000000000000000000000..519adb666b05d04a425b3de95c40ca79e3509f04
--- /dev/null
+++ b/docs/qa.md
@@ -0,0 +1,148 @@
+## Frequently Asked Questions
+Before you post a new question, please first look at the following Q & A and existing GitHub issues. You may also want to read [Training/Test tips](tips.md) for more suggestions.
+
+#### Connection Error:HTTPConnectionPool ([#230](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/230), [#24](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/24), [#38](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/38))
+Similar error messages include “Failed to establish a new connection/Connection refused”.
+
+Please start the visdom server before starting the training:
+```bash
+python -m visdom.server
+```
+To install the visdom, you can use the following command:
+```bash
+pip install visdom
+```
+You can also disable the visdom by setting `--display_id 0`.
+
+#### My PyTorch errors on CUDA related code.
+Try to run the following code snippet to make sure that CUDA is working (assuming using PyTorch >= 0.4):
+```python
+import torch
+torch.cuda.init()
+print(torch.randn(1, device='cuda'))
+```
+
+If you met an error, it is likely that your PyTorch build does not work with CUDA, e.g., it is installed from the official MacOS binary, or you have a GPU that is too old and not supported anymore. You may run the the code with CPU using `--gpu_ids -1`.
+
+#### TypeError: Object of type 'Tensor' is not JSON serializable ([#258](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/258))
+Similar errors: AttributeError: module 'torch' has no attribute 'device' ([#314](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/314))
+
+The current code only works with PyTorch 0.4+. An earlier PyTorch version can often cause the above errors.
+
+#### ValueError: empty range for randrange() ([#390](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/390), [#376](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/376), [#194](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/194))
+Similar error messages include "ConnectionRefusedError: [Errno 111] Connection refused"
+
+It is related to the data augmentation step. It often happens when you use `--preprocess crop`. The program will crop random `crop_size x crop_size` patches out of the input training images. But if some of your image sizes (e.g., `256x384`) are smaller than the `crop_size` (e.g., 512), you will get this error. A simple fix will be to use other data augmentation methods such as `resize_and_crop` or `scale_width_and_crop`. Our program will automatically resize the images according to `load_size` before apply `crop_size x crop_size` cropping. Make sure that `load_size >= crop_size`.
+
+
+#### Can I continue/resume my training? ([#350](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/350), [#275](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/275), [#234](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/234), [#87](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/87))
+You can use the option `--continue_train`. Also set `--epoch_count` to specify a different starting epoch count. See more discussion in [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#trainingtest-tips).
+
+#### Why does my training loss not converge? ([#335](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/335), [#164](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/164), [#30](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/30))
+Many GAN losses do not converge (exception: WGAN, WGAN-GP, etc. ) due to the nature of minimax optimization. For DCGAN and LSGAN objective, it is quite normal for the G and D losses to go up and down. It should be fine as long as they do not blow up.
+
+#### How can I make it work for my own data (e.g., 16-bit png, tiff, hyperspectral images)? ([#309](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/309), [#320](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/), [#202](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/202))
+The current code only supports RGB and grayscale images. If you would like to train the model on other data types, please follow the following steps:
+
+- change the parameters `--input_nc` and `--output_nc` to the number of channels in your input/output images.
+- Write your own custom data loader (It is easy as long as you know how to load your data with python). If you write a new data loader class, you need to change the flag `--dataset_mode` accordingly. Alternatively, you can modify the existing data loader. For aligned datasets, change this [line](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/aligned_dataset.py#L41); For unaligned datasets, change these two [lines](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/unaligned_dataset.py#L57).
+
+- If you use visdom and HTML to visualize the results, you may also need to change the visualization code.
+
+#### Multi-GPU Training ([#327](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/327), [#292](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/292), [#137](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/137), [#35](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/35))
+You can use Multi-GPU training by setting `--gpu_ids` (e.g., `--gpu_ids 0,1,2,3` for the first four GPUs on your machine.) To fully utilize all the GPUs, you need to increase your batch size. Try `--batch_size 4`, `--batch_size 16`, or even a larger batch_size. Each GPU will process batch_size/#GPUs images. The optimal batch size depends on the number of GPUs you have, GPU memory per GPU, and the resolution of your training images.
+
+We also recommend that you use the instance normalization for multi-GPU training by setting `--norm instance`. The current batch normalization might not work for multi-GPUs as the batchnorm parameters are not shared across different GPUs. Advanced users can try [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch).
+
+
+#### Can I run the model on CPU? ([#310](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/310))
+Yes, you can set `--gpu_ids -1`. See [training/test tips](tips.md) for more details.
+
+
+#### Are pre-trained models available? ([#10](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/10))
+Yes, you can download pretrained models with the bash script `./scripts/download_cyclegan_model.sh`. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details. We are slowly adding more models to the repo.
+
+#### Out of memory ([#174](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/174))
+CycleGAN is more memory-intensive than pix2pix as it requires two generators and two discriminators. If you would like to produce high-resolution images, you can do the following.
+
+- During training, train CycleGAN on cropped images of the training set. Please be careful not to change the aspect ratio or the scale of the original image, as this can lead to the training/test gap. You can usually do this by using `--preprocess crop` option, or `--preprocess scale_width_and_crop`.
+
+- Then at test time, you can load only one generator to produce the results in a single direction. This greatly saves GPU memory as you are not loading the discriminators and the other generator in the opposite direction. You can probably take the whole image as input. You can do this using `--model test --dataroot [path to the directory that contains your test images (e.g., ./datasets/horse2zebra/trainA)] --model_suffix _A --preprocess none`. You can use either `--preprocess none` or `--preprocess scale_width --crop_size [your_desired_image_width]`. Please see the [model_suffix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/test_model.py#L16) and [preprocess](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/base_dataset.py#L24) for more details.
+
+#### RuntimeError: Error(s) in loading state_dict ([#812](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/812), [#671](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/671),[#461](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/461), [#296](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/296))
+If you get the above errors when loading the generator during test time, you probably have used different network configurations for training and test. There are a few things to check: (1) the network architecture `--netG`: you will get an error if you use `--netG unet256` during training, and use `--netG resnet_6blocks` during test. Make sure that the flag is the same. (2) the normalization parameters `--norm`: we use different default `--norm` parameters for `--model cycle_gan`, `--model pix2pix`, and `--model test`. They might be different from the one you used in your training time. Make sure that you add the `--norm` flag in your test code. (3) If you use dropout during training time, make sure that you use the same Dropout setting in your test. Check the flag `--no_dropout`.
+
+Note that we use different default generators, normalization, and dropout options for different models. The model file can overwrite the default arguments and add new arguments. For example, this [line](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/pix2pix_model.py#L32) adds and changes default arguments for pix2pix. For CycleGAN, the default is `--netG resnet_9blocks --no_dropout --norm instance --dataset_mode unaligned`. For pix2pix, the default is `--netG unet_256 --norm batch --dataset_mode aligned`. For model testing with single direction (`--model test`), the default is `--netG resnet_9blocks --norm instance --dataset_mode single`. To make sure that your training and test follow the same setting, you are encouraged to plicitly specify the `--netG`, `--norm`, `--dataset_mode`, and `--no_dropout` (or not) in your script.
+
+#### NotSupportedError ([#829](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/829))
+The error message states that `slicing multiple dimensions at the same time isn't supported yet proposals (Tensor): boxes to be encoded`. It is not related to our repo. It is often caused by incompatibility between the `torhvision` version and `pytorch` version. You need to re-intall or upgrade your `torchvision` to be compatible with the `pytorch` version.
+
+
+#### What is the identity loss? ([#322](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/322), [#373](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/373), [#362](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/pull/362))
+We use the identity loss for our photo to painting application. The identity loss can regularize the generator to be close to an identity mapping when fed with real samples from the *target* domain. If something already looks like from the target domain, you should preserve the image without making additional changes. The generator trained with this loss will often be more conservative for unknown content. Please see more details in Sec 5.2 ''Photo generation from paintings'' and Figure 12 in the CycleGAN [paper](https://arxiv.org/pdf/1703.10593.pdf). The loss was first proposed in Equation 6 of the prior work [[Taigman et al., 2017]](https://arxiv.org/pdf/1611.02200.pdf).
+
+#### The color gets inverted from the beginning of training ([#249](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/249))
+The authors also observe that the generator unnecessarily inverts the color of the input image early in training, and then never learns to undo the inversion. In this case, you can try two things.
+
+- First, try using identity loss `--lambda_identity 1.0` or `--lambda_identity 0.1`. We observe that the identity loss makes the generator to be more conservative and make fewer unnecessary changes. However, because of this, the change may not be as dramatic.
+
+- Second, try smaller variance when initializing weights by changing `--init_gain`. We observe that a smaller variance in weight initialization results in less color inversion.
+
+#### For labels2photo Cityscapes evaluation, why does the pretrained FCN-8s model not work well on the original Cityscapes input images? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150))
+The model was trained on 256x256 images that are resized/upsampled to 1024x2048, so expected input images to the network are very blurry. The purpose of the resizing was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need to change the standard FCN training code for Cityscapes.
+
+#### How do I get the `ground-truth` numbers on the labels2photo Cityscapes evaluation? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150))
+You need to resize the original Cityscapes images to 256x256 before running the evaluation code.
+
+#### What is a good evaluation metric for CycleGAN? ([#730](https://github.com/pulls), [#716](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/716), [#166](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/166))
+The evaluation metric highly depends on your specific task and dataset. There is no single metric that works for all the datasets and tasks.
+
+There are a few popular choices: (1) we often evaluate CycleGAN on paired datasets (e.g., Cityscapes dataset and the meanIOU metric used in the CycleGAN paper), in which the model was trained without pairs. (2) Many researchers have adopted standard GAN metrics such as FID. Note that FID only evaluates the output images, while it ignores the correspondence between output and input. (3) A user study regarding photorealism might be helpful. Please check out the details of a user study in the CycleGAN paper (Section 5.1.1).
+
+In summary, how to automatically evaluate the results is an open research problem for GANs research. But for many creative applications, the results are subjective and hard to quantify without humans in the loop.
+
+
+#### What dose the CycleGAN loss look like if training goes well? ([#1096](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/1096), [#1086](ttps://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/1086), [#288](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/288), [#30](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/30))
+Typically, the cycle-consistency loss and identity loss decrease during training, while GAN losses oscillate. To evaluate the quality of your results, you need to adopt additional evaluation metrics to your training and test images. See the Q & A above.
+
+
+#### Using resize-conv to reduce checkerboard artifacts ([#190](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/190), [#64](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/64))
+This Distill [blog](https://distill.pub/2016/deconv-checkerboard/) discussed one of the potential causes of the checkerboard artifacts. You can fix that issue by switching from "deconvolution" to nearest-neighbor upsampling, followed by regular convolution. Here is one implementation provided by [@SsnL](https://github.com/SsnL). You can replace the ConvTranspose2d with the following layers.
+```python
+nn.Upsample(scale_factor = 2, mode='bilinear'),
+nn.ReflectionPad2d(1),
+nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),
+```
+We have also noticed that sometimes the checkboard artifacts will go away if you train long enough. Maybe you can try training your model a bit longer.
+
+#### pix2pix/CycleGAN has no random noise z ([#152](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/152))
+The current pix2pix/CycleGAN model does not take z as input. In both pix2pix and CycleGAN, we tried to add z to the generator: e.g., adding z to a latent state, concatenating with a latent state, applying dropout, etc., but often found the output did not vary significantly as a function of z. Conditional GANs do not need noise as long as the input is sufficiently complex so that the input can kind of play the role of noise. Without noise, the mapping is deterministic.
+
+Please check out the following papers that show ways of getting z to actually have a substantial effect: e.g., [BicycleGAN](https://github.com/junyanz/BicycleGAN), [AugmentedCycleGAN](https://arxiv.org/abs/1802.10151), [MUNIT](https://arxiv.org/abs/1804.04732), [DRIT](https://arxiv.org/pdf/1808.00948.pdf), etc.
+
+#### Experiment details (e.g., BW->color) ([#306](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/306))
+You can find more training details and hyperparameter settings in the appendix of [CycleGAN](https://arxiv.org/abs/1703.10593) and [pix2pix](https://arxiv.org/abs/1611.07004) papers.
+
+#### Results with [Cycada](https://arxiv.org/pdf/1711.03213.pdf)
+We generated the [result of translating GTA images to Cityscapes-style images](https://junyanz.github.io/CycleGAN/) using our Torch repo. Our PyTorch and Torch implementation seemed to produce a little bit different results, although we have not measured the FCN score using the PyTorch-trained model. To reproduce the result of Cycada, please use the Torch repo for now.
+
+#### Loading and using the saved model in your code
+You can easily consume the model in your code using the below code snippet:
+
+```python
+import torch
+from models.networks import define_G
+from collections import OrderedDict
+
+model_dict = torch.load("checkpoints/stars_pix2pix/latest_net_G.pth")
+new_dict = OrderedDict()
+for k, v in model_dict.items():
+ # load_state_dict expects keys with prefix 'module.'
+ new_dict["module." + k] = v
+
+# make sure you pass the correct parameters to the define_G method
+generator_model = define_G(input_nc=1,output_nc=1,ngf=64,netG="resnet_9blocks",
+ norm="batch",use_dropout=True,init_gain=0.02,gpu_ids=[0])
+generator_model.load_state_dict(new_dict)
+```
+If everything goes well you should see a '\' message.
diff --git a/docs/tips.md b/docs/tips.md
new file mode 100644
index 0000000000000000000000000000000000000000..b5959349006d19bee178b5402c8b4106b1d1b956
--- /dev/null
+++ b/docs/tips.md
@@ -0,0 +1,74 @@
+## Training/test Tips
+#### Training/test options
+Please see `options/train_options.py` and `options/base_options.py` for the training flags; see `options/test_options.py` and `options/base_options.py` for the test flags. There are some model-specific flags as well, which are added in the model files, such as `--lambda_A` option in `model/cycle_gan_model.py`. The default values of these options are also adjusted in the model files.
+#### CPU/GPU (default `--gpu_ids 0`)
+Please set`--gpu_ids -1` to use CPU mode; set `--gpu_ids 0,1,2` for multi-GPU mode. You need a large batch size (e.g., `--batch_size 32`) to benefit from multiple GPUs.
+
+#### Visualization
+During training, the current results can be viewed using two methods. First, if you set `--display_id` > 0, the results and loss plot will appear on a local graphics web server launched by [visdom](https://github.com/facebookresearch/visdom). To do this, you should have `visdom` installed and a server running by the command `python -m visdom.server`. The default server URL is `http://localhost:8097`. `display_id` corresponds to the window ID that is displayed on the `visdom` server. The `visdom` display functionality is turned on by default. To avoid the extra overhead of communicating with `visdom` set `--display_id -1`. Second, the intermediate results are saved to `[opt.checkpoints_dir]/[opt.name]/web/` as an HTML file. To avoid this, set `--no_html`.
+
+#### Preprocessing
+ Images can be resized and cropped in different ways using `--preprocess` option. The default option `'resize_and_crop'` resizes the image to be of size `(opt.load_size, opt.load_size)` and does a random crop of size `(opt.crop_size, opt.crop_size)`. `'crop'` skips the resizing step and only performs random cropping. `'scale_width'` resizes the image to have width `opt.crop_size` while keeping the aspect ratio. `'scale_width_and_crop'` first resizes the image to have width `opt.load_size` and then does random cropping of size `(opt.crop_size, opt.crop_size)`. `'none'` tries to skip all these preprocessing steps. However, if the image size is not a multiple of some number depending on the number of downsamplings of the generator, you will get an error because the size of the output image may be different from the size of the input image. Therefore, `'none'` option still tries to adjust the image size to be a multiple of 4. You might need a bigger adjustment if you change the generator architecture. Please see `data/base_dataset.py` do see how all these were implemented.
+
+#### Fine-tuning/resume training
+To fine-tune a pre-trained model, or resume the previous training, use the `--continue_train` flag. The program will then load the model based on `epoch`. By default, the program will initialize the epoch count as 1. Set `--epoch_count ` to specify a different starting epoch count.
+
+
+#### Prepare your own datasets for CycleGAN
+You need to create two directories to host images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB`. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Optionally, you can create hold-out test datasets at `/path/to/data/testA` and `/path/to/data/testB` to test your model on unseen images.
+
+#### Prepare your own datasets for pix2pix
+Pix2pix's training requires paired data. We provide a python script to generate training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
+
+Create folder `/path/to/data` with subdirectories `A` and `B`. `A` and `B` should each have their own subdirectories `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
+
+Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
+
+Once the data is formatted this way, call:
+```bash
+python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
+```
+
+This will combine each pair of images (A,B) into a single image file, ready for training.
+
+
+#### About image size
+ Since the generator architecture in CycleGAN involves a series of downsampling / upsampling operations, the size of the input and output image may not match if the input image size is not a multiple of 4. As a result, you may get a runtime error because the L1 identity loss cannot be enforced with images of different size. Therefore, we slightly resize the image to become multiples of 4 even with `--preprocess none` option. For the same reason, `--crop_size` needs to be a multiple of 4.
+
+#### Training/Testing with high res images
+CycleGAN is quite memory-intensive as four networks (two generators and two discriminators) need to be loaded on one GPU, so a large image cannot be entirely loaded. In this case, we recommend training with cropped images. For example, to generate 1024px results, you can train with `--preprocess scale_width_and_crop --load_size 1024 --crop_size 360`, and test with `--preprocess scale_width --load_size 1024`. This way makes sure the training and test will be at the same scale. At test time, you can afford higher resolution because you don’t need to load all networks.
+
+#### Training/Testing with rectangular images
+Both pix2pix and CycleGAN can work for rectangular images. To make them work, you need to use different preprocessing flags. Let's say that you are working with `360x256` images. During training, you can specify `--preprocess crop` and `--crop_size 256`. This will allow your model to be trained on randomly cropped `256x256` images during training time. During test time, you can apply the model on `360x256` images with the flag `--preprocess none`.
+
+There are practical restrictions regarding image sizes for each generator architecture. For `unet256`, it only supports images whose width and height are divisible by 256. For `unet128`, the width and height need to be divisible by 128. For `resnet_6blocks` and `resnet_9blocks`, the width and height need to be divisible by 4.
+
+#### About loss curve
+Unfortunately, the loss curve does not reveal much information in training GANs, and CycleGAN is no exception. To check whether the training has converged or not, we recommend periodically generating a few samples and looking at them.
+
+#### About batch size
+For all experiments in the paper, we set the batch size to be 1. If there is room for memory, you can use higher batch size with batch norm or instance norm. (Note that the default batchnorm does not work well with multi-GPU training. You may consider using [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch) instead). But please be aware that it can impact the training. In particular, even with Instance Normalization, different batch sizes can lead to different results. Moreover, increasing `--crop_size` may be a good alternative to increasing the batch size.
+
+
+#### Notes on Colorization
+No need to run `combine_A_and_B.py` for colorization. Instead, you need to prepare natural images and set `--dataset_mode colorization` and `--model colorization` in the script. The program will automatically convert each RGB image into Lab color space, and create `L -> ab` image pair during the training. Also set `--input_nc 1` and `--output_nc 2`. The training and test directory should be organized as `/your/data/train` and `your/data/test`. See example scripts `scripts/train_colorization.sh` and `scripts/test_colorization` for more details.
+
+#### Notes on Extracting Edges
+We provide python and Matlab scripts to extract coarse edges from photos. Run `scripts/edges/batch_hed.py` to compute [HED](https://github.com/s9xie/hed) edges. Run `scripts/edges/PostprocessHED.m` to simplify edges with additional post-processing steps. Check the code documentation for more details.
+
+#### Evaluating Labels2Photos on Cityscapes
+We provide scripts for running the evaluation of the Labels2Photos task on the Cityscapes **validation** set. We assume that you have installed `caffe` (and `pycaffe`) in your system. If not, see the [official website](http://caffe.berkeleyvision.org/installation.html) for installation instructions. Once `caffe` is successfully installed, download the pre-trained FCN-8s semantic segmentation model (512MB) by running
+```bash
+bash ./scripts/eval_cityscapes/download_fcn8s.sh
+```
+Then make sure `./scripts/eval_cityscapes/` is in your system's python path. If not, run the following command to add it
+```bash
+export PYTHONPATH=${PYTHONPATH}:./scripts/eval_cityscapes/
+```
+Now you can run the following command to evaluate your predictions:
+```bash
+python ./scripts/eval_cityscapes/evaluate.py --cityscapes_dir /path/to/original/cityscapes/dataset/ --result_dir /path/to/your/predictions/ --output_dir /path/to/output/directory/
+```
+Images stored under `--result_dir` should contain your model predictions on the Cityscapes **validation** split, and have the original Cityscapes naming convention (e.g., `frankfurt_000001_038418_leftImg8bit.png`). The script will output a text file under `--output_dir` containing the metric.
+
+**Further notes**: Our pre-trained FCN model is **not** supposed to work on Cityscapes in the original resolution (1024x2048) as it was trained on 256x256 images that are then upsampled to 1024x2048 during training. The purpose of the resizing during training was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need of changing the standard FCN training code and the architecture for Cityscapes. During test time, you need to synthesize 256x256 results. Our test code will automatically upsample your results to 1024x2048 before feeding them to the pre-trained FCN model. The output is at 1024x2048 resolution and will be compared to 1024x2048 ground truth labels. You do not need to resize the ground truth labels. The best way to verify whether everything is correct is to reproduce the numbers for real images in the paper first. To achieve it, you need to resize the original/real Cityscapes images (**not** labels) to 256x256 and feed them to the evaluation code.
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 0000000000000000000000000000000000000000..62b3b130af646be01c77f0292058bde4bba7b169
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,17 @@
+name: pytorch-CycleGAN-and-pix2pix
+channels:
+- pytorch
+- defaults
+dependencies:
+- python=3.8
+- pytorch=1.8.1
+- scipy
+- pip
+- pip:
+ - dominate==2.6.0
+ - torchvision==0.9.1
+ - Pillow==8.0.1
+ - numpy==1.19.2
+ - visdom==0.1.8
+ - wandb==0.12.18
+
diff --git a/generate.py b/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..b29582be921067eb419b1df3fff42e1f75d08a63
--- /dev/null
+++ b/generate.py
@@ -0,0 +1,53 @@
+"""
+usage:
+python3 generate.py --image_path ./apple_test.jpg --name apple2orange --model cycle_gan --gpu_ids -1
+
+gpu_ids: -1 for cpu inference
+"""
+from options.test_options import TestOptions
+opt = TestOptions().parse()
+from models.one_direction_test_model import OneDirectionTestModel
+from data.unaligned_data_loader import load_image_for_prediction
+import sys
+import cv2
+import os
+import numpy as np
+from PIL import Image
+
+opt.nThreads = 1
+opt.batchSize = 1
+opt.serial_batches = True
+
+
+def generate():
+ """
+ generate single image specific by image path, and show the after generated image
+ :return:
+ """
+ image_path = opt.image_path
+ print('generate from {}'.format(image_path))
+
+ data = load_image_for_prediction(opt, image_path)
+
+ model = OneDirectionTestModel()
+ model.initialize(opt=opt)
+ model.set_input(data)
+ model.test()
+
+ visuals = model.get_current_visuals()
+ generated_a = visuals['fake_B']
+
+ image_generated = Image.fromarray(generated_a)
+ image_generated.save(str(os.path.basename(image_path).split('.')[0]) + '_fake_b.jpg')
+
+ combined_result = np.concatenate([img for _, img in visuals.items()], 1)
+ image_combined = Image.fromarray(combined_result)
+ image_combined.save(str(os.path.basename(image_path).split('.')[0]) + '_combined.jpg')
+ image_combined.show()
+ print('generated image saved.')
+
+
+
+if __name__ == '__main__':
+ generate()
+
diff --git a/imgs/edges2cats.jpg b/imgs/edges2cats.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..be5c1412eb0decea00d7f62e609bab555fa6b0e2
Binary files /dev/null and b/imgs/edges2cats.jpg differ
diff --git a/imgs/horse2zebra.gif b/imgs/horse2zebra.gif
new file mode 100644
index 0000000000000000000000000000000000000000..4ded4d1ec2f5438765418c8a32d5e6f401b7d5cf
--- /dev/null
+++ b/imgs/horse2zebra.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16a76adedd309c46ba6ed63f89b14130c4a671fd6febc26fb0372a1ccf16c7aa
+size 7686299
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc01113da66ff042bd1807b5bfdb70c4bce8d14c
--- /dev/null
+++ b/models/__init__.py
@@ -0,0 +1,67 @@
+"""This package contains modules related to objective functions, optimizations, and network architectures.
+
+To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
+You need to implement the following five functions:
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
+ -- : unpack data from dataset and apply preprocessing.
+ -- : produce intermediate results.
+ -- : calculate loss, gradients, and update network weights.
+ -- : (optionally) add model-specific options and set default options.
+
+In the function <__init__>, you need to define four lists:
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
+ -- self.model_names (str list): define networks used in our training.
+ -- self.visual_names (str list): specify the images that you want to display and save.
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
+
+Now you can use the model class by specifying flag '--model dummy'.
+See our template model class 'template_model.py' for more details.
+"""
+
+import importlib
+from models.base_model import BaseModel
+
+
+def find_model_using_name(model_name):
+ """Import the module "models/[model_name]_model.py".
+
+ In the file, the class called DatasetNameModel() will
+ be instantiated. It has to be a subclass of BaseModel,
+ and it is case-insensitive.
+ """
+ model_filename = "models." + model_name + "_model"
+ modellib = importlib.import_module(model_filename)
+ model = None
+ target_model_name = model_name.replace('_', '') + 'model'
+ for name, cls in modellib.__dict__.items():
+ if name.lower() == target_model_name.lower() \
+ and issubclass(cls, BaseModel):
+ model = cls
+
+ if model is None:
+ print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
+ exit(0)
+
+ return model
+
+
+def get_option_setter(model_name):
+ """Return the static method of the model class."""
+ model_class = find_model_using_name(model_name)
+ return model_class.modify_commandline_options
+
+
+def create_model(opt):
+ """Create a model given the option.
+
+ This function warps the class CustomDatasetDataLoader.
+ This is the main interface between this package and 'train.py'/'test.py'
+
+ Example:
+ >>> from models import create_model
+ >>> model = create_model(opt)
+ """
+ model = find_model_using_name(opt.model)
+ instance = model(opt)
+ print("model [%s] was created" % type(instance).__name__)
+ return instance
diff --git a/models/__pycache__/__init__.cpython-39.pyc b/models/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47a11b63cb10143e5adc90babc01103886826e15
Binary files /dev/null and b/models/__pycache__/__init__.cpython-39.pyc differ
diff --git a/models/__pycache__/base_model.cpython-39.pyc b/models/__pycache__/base_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cc3432f4ab13cb5029a050cc3c63e457eb53f1b
Binary files /dev/null and b/models/__pycache__/base_model.cpython-39.pyc differ
diff --git a/models/__pycache__/colorization_model.cpython-39.pyc b/models/__pycache__/colorization_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cc04ca36e9474ce0690d9671c886030b2d02552
Binary files /dev/null and b/models/__pycache__/colorization_model.cpython-39.pyc differ
diff --git a/models/__pycache__/networks.cpython-39.pyc b/models/__pycache__/networks.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fef66119597b6b1327d83707aaae01759560f54b
Binary files /dev/null and b/models/__pycache__/networks.cpython-39.pyc differ
diff --git a/models/__pycache__/pix2pix_model.cpython-39.pyc b/models/__pycache__/pix2pix_model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c4eadacbc9c815dc18ddc67e29542bbdd24bed7
Binary files /dev/null and b/models/__pycache__/pix2pix_model.cpython-39.pyc differ
diff --git a/models/base_model.py b/models/base_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..6de961b51a2cc12634fc40c39f63b017c6262b4c
--- /dev/null
+++ b/models/base_model.py
@@ -0,0 +1,230 @@
+import os
+import torch
+from collections import OrderedDict
+from abc import ABC, abstractmethod
+from . import networks
+
+
+class BaseModel(ABC):
+ """This class is an abstract base class (ABC) for models.
+ To create a subclass, you need to implement the following five functions:
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
+ -- : unpack data from dataset and apply preprocessing.
+ -- : produce intermediate results.
+ -- : calculate losses, gradients, and update network weights.
+ -- : (optionally) add model-specific options and set default options.
+ """
+
+ def __init__(self, opt):
+ """Initialize the BaseModel class.
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+
+ When creating your custom class, you need to implement your own initialization.
+ In this function, you should first call
+ Then, you need to define four lists:
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
+ -- self.model_names (str list): define networks used in our training.
+ -- self.visual_names (str list): specify the images that you want to display and save.
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
+ """
+ self.opt = opt
+ self.gpu_ids = opt.gpu_ids
+ self.isTrain = opt.isTrain
+ self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
+ self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
+ if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
+ torch.backends.cudnn.benchmark = True
+ self.loss_names = []
+ self.model_names = []
+ self.visual_names = []
+ self.optimizers = []
+ self.image_paths = []
+ self.metric = 0 # used for learning rate policy 'plateau'
+
+ @staticmethod
+ def modify_commandline_options(parser, is_train):
+ """Add new model-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+ """
+ return parser
+
+ @abstractmethod
+ def set_input(self, input):
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
+
+ Parameters:
+ input (dict): includes the data itself and its metadata information.
+ """
+ pass
+
+ @abstractmethod
+ def forward(self):
+ """Run forward pass; called by both functions and ."""
+ pass
+
+ @abstractmethod
+ def optimize_parameters(self):
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
+ pass
+
+ def setup(self, opt):
+ """Load and print networks; create schedulers
+
+ Parameters:
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ if self.isTrain:
+ self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
+ if not self.isTrain or opt.continue_train:
+ load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
+ self.load_networks(load_suffix)
+ self.print_networks(opt.verbose)
+
+ def eval(self):
+ """Make models eval mode during test time"""
+ for name in self.model_names:
+ if isinstance(name, str):
+ net = getattr(self, 'net' + name)
+ net.eval()
+
+ def test(self):
+ """Forward function used in test time.
+
+ This function wraps function in no_grad() so we don't save intermediate steps for backprop
+ It also calls to produce additional visualization results
+ """
+ with torch.no_grad():
+ self.forward()
+ self.compute_visuals()
+
+ def compute_visuals(self):
+ """Calculate additional output images for visdom and HTML visualization"""
+ pass
+
+ def get_image_paths(self):
+ """ Return image paths that are used to load current data"""
+ return self.image_paths
+
+ def update_learning_rate(self):
+ """Update learning rates for all the networks; called at the end of every epoch"""
+ old_lr = self.optimizers[0].param_groups[0]['lr']
+ for scheduler in self.schedulers:
+ if self.opt.lr_policy == 'plateau':
+ scheduler.step(self.metric)
+ else:
+ scheduler.step()
+
+ lr = self.optimizers[0].param_groups[0]['lr']
+ print('learning rate %.7f -> %.7f' % (old_lr, lr))
+
+ def get_current_visuals(self):
+ """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
+ visual_ret = OrderedDict()
+ for name in self.visual_names:
+ if isinstance(name, str):
+ visual_ret[name] = getattr(self, name)
+ return visual_ret
+
+ def get_current_losses(self):
+ """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
+ errors_ret = OrderedDict()
+ for name in self.loss_names:
+ if isinstance(name, str):
+ errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
+ return errors_ret
+
+ def save_networks(self, epoch):
+ """Save all the networks to the disk.
+
+ Parameters:
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
+ """
+ for name in self.model_names:
+ if isinstance(name, str):
+ save_filename = '%s_net_%s.pth' % (epoch, name)
+ save_path = os.path.join(self.save_dir, save_filename)
+ net = getattr(self, 'net' + name)
+
+ if len(self.gpu_ids) > 0 and torch.cuda.is_available():
+ torch.save(net.module.cpu().state_dict(), save_path)
+ net.cuda(self.gpu_ids[0])
+ else:
+ torch.save(net.cpu().state_dict(), save_path)
+
+ def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
+ """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
+ key = keys[i]
+ if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
+ if module.__class__.__name__.startswith('InstanceNorm') and \
+ (key == 'running_mean' or key == 'running_var'):
+ if getattr(module, key) is None:
+ state_dict.pop('.'.join(keys))
+ if module.__class__.__name__.startswith('InstanceNorm') and \
+ (key == 'num_batches_tracked'):
+ state_dict.pop('.'.join(keys))
+ else:
+ self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
+
+ def load_networks(self, epoch):
+ """Load all the networks from the disk.
+
+ Parameters:
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
+ """
+ for name in self.model_names:
+ if isinstance(name, str):
+ load_filename = '%s_net_%s.pth' % (epoch, name)
+ load_path = os.path.join(self.save_dir, load_filename)
+ net = getattr(self, 'net' + name)
+ if isinstance(net, torch.nn.DataParallel):
+ net = net.module
+ print('loading the model from %s' % load_path)
+ # if you are using PyTorch newer than 0.4 (e.g., built from
+ # GitHub source), you can remove str() on self.device
+ state_dict = torch.load(load_path, map_location=str(self.device))
+ if hasattr(state_dict, '_metadata'):
+ del state_dict._metadata
+
+ # patch InstanceNorm checkpoints prior to 0.4
+ for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
+ self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
+ net.load_state_dict(state_dict)
+
+ def print_networks(self, verbose):
+ """Print the total number of parameters in the network and (if verbose) network architecture
+
+ Parameters:
+ verbose (bool) -- if verbose: print the network architecture
+ """
+ print('---------- Networks initialized -------------')
+ for name in self.model_names:
+ if isinstance(name, str):
+ net = getattr(self, 'net' + name)
+ num_params = 0
+ for param in net.parameters():
+ num_params += param.numel()
+ if verbose:
+ print(net)
+ print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
+ print('-----------------------------------------------')
+
+ def set_requires_grad(self, nets, requires_grad=False):
+ """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
+ Parameters:
+ nets (network list) -- a list of networks
+ requires_grad (bool) -- whether the networks require gradients or not
+ """
+ if not isinstance(nets, list):
+ nets = [nets]
+ for net in nets:
+ if net is not None:
+ for param in net.parameters():
+ param.requires_grad = requires_grad
diff --git a/models/colorization_model.py b/models/colorization_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b4a12722e52cf93b85504bbe9a078f7b396d28b
--- /dev/null
+++ b/models/colorization_model.py
@@ -0,0 +1,68 @@
+from .pix2pix_model import Pix2PixModel
+import torch
+from skimage import color # used for lab2rgb
+import numpy as np
+
+
+class ColorizationModel(Pix2PixModel):
+ """This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
+
+ The model training requires '-dataset_model colorization' dataset.
+ It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
+ By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
+ """
+ @staticmethod
+ def modify_commandline_options(parser, is_train=True):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+
+ By default, we use 'colorization' dataset for this model.
+ See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
+ """
+ Pix2PixModel.modify_commandline_options(parser, is_train)
+ parser.set_defaults(dataset_mode='colorization')
+ return parser
+
+ def __init__(self, opt):
+ """Initialize the class.
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+
+ For visualization, we set 'visual_names' as 'real_A' (input real image),
+ 'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
+ We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
+ we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
+ """
+ # reuse the pix2pix model
+ Pix2PixModel.__init__(self, opt)
+ # specify the images to be visualized.
+ self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
+
+ def lab2rgb(self, L, AB):
+ """Convert an Lab tensor image to a RGB numpy output
+ Parameters:
+ L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
+ AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
+
+ Returns:
+ rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
+ """
+ AB2 = AB * 110.0
+ L2 = (L + 1.0) * 50.0
+ Lab = torch.cat([L2, AB2], dim=1)
+ Lab = Lab[0].data.cpu().float().numpy()
+ Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
+ rgb = color.lab2rgb(Lab) * 255
+ return rgb
+
+ def compute_visuals(self):
+ """Calculate additional output images for visdom and HTML visualization"""
+ self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
+ self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
diff --git a/models/cycle_gan_model.py b/models/cycle_gan_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..15bb72d8ddc4e7891b643578025037c34f047aec
--- /dev/null
+++ b/models/cycle_gan_model.py
@@ -0,0 +1,194 @@
+import torch
+import itertools
+from util.image_pool import ImagePool
+from .base_model import BaseModel
+from . import networks
+
+
+class CycleGANModel(BaseModel):
+ """
+ This class implements the CycleGAN model, for learning image-to-image translation without paired data.
+
+ The model training requires '--dataset_mode unaligned' dataset.
+ By default, it uses a '--netG resnet_9blocks' ResNet generator,
+ a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
+ and a least-square GANs objective ('--gan_mode lsgan').
+
+ CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
+ """
+ @staticmethod
+ def modify_commandline_options(parser, is_train=True):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+
+ For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
+ A (source domain), B (target domain).
+ Generators: G_A: A -> B; G_B: B -> A.
+ Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
+ Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
+ Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
+ Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
+ Dropout is not used in the original CycleGAN paper.
+ """
+ parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
+ if is_train:
+ parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
+ parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
+ parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
+
+ return parser
+
+ def __init__(self, opt):
+ """Initialize the CycleGAN class.
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseModel.__init__(self, opt)
+ # specify the training losses you want to print out. The training/test scripts will call
+ self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
+ # specify the images you want to save/display. The training/test scripts will call
+ visual_names_A = ['real_A', 'fake_B', 'rec_A']
+ visual_names_B = ['real_B', 'fake_A', 'rec_B']
+ if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
+ visual_names_A.append('idt_B')
+ visual_names_B.append('idt_A')
+
+ self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
+ # specify the models you want to save to the disk. The training/test scripts will call and .
+ if self.isTrain:
+ self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
+ else: # during test time, only load Gs
+ self.model_names = ['G_A', 'G_B']
+
+ # define networks (both Generators and discriminators)
+ # The naming is different from those used in the paper.
+ # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
+ self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
+ self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
+
+ if self.isTrain: # define discriminators
+ self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
+ self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
+
+ if self.isTrain:
+ if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
+ assert(opt.input_nc == opt.output_nc)
+ self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
+ self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
+ # define loss functions
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
+ self.criterionCycle = torch.nn.L1Loss()
+ self.criterionIdt = torch.nn.L1Loss()
+ # initialize optimizers; schedulers will be automatically created by function .
+ self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
+ self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
+ self.optimizers.append(self.optimizer_G)
+ self.optimizers.append(self.optimizer_D)
+
+ def set_input(self, input):
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
+
+ Parameters:
+ input (dict): include the data itself and its metadata information.
+
+ The option 'direction' can be used to swap domain A and domain B.
+ """
+ AtoB = self.opt.direction == 'AtoB'
+ self.real_A = input['A' if AtoB else 'B'].to(self.device)
+ self.real_B = input['B' if AtoB else 'A'].to(self.device)
+ self.image_paths = input['A_paths' if AtoB else 'B_paths']
+
+ def forward(self):
+ """Run forward pass; called by both functions and ."""
+ self.fake_B = self.netG_A(self.real_A) # G_A(A)
+ self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
+ self.fake_A = self.netG_B(self.real_B) # G_B(B)
+ self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
+
+ def backward_D_basic(self, netD, real, fake):
+ """Calculate GAN loss for the discriminator
+
+ Parameters:
+ netD (network) -- the discriminator D
+ real (tensor array) -- real images
+ fake (tensor array) -- images generated by a generator
+
+ Return the discriminator loss.
+ We also call loss_D.backward() to calculate the gradients.
+ """
+ # Real
+ pred_real = netD(real)
+ loss_D_real = self.criterionGAN(pred_real, True)
+ # Fake
+ pred_fake = netD(fake.detach())
+ loss_D_fake = self.criterionGAN(pred_fake, False)
+ # Combined loss and calculate gradients
+ loss_D = (loss_D_real + loss_D_fake) * 0.5
+ loss_D.backward()
+ return loss_D
+
+ def backward_D_A(self):
+ """Calculate GAN loss for discriminator D_A"""
+ fake_B = self.fake_B_pool.query(self.fake_B)
+ self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
+
+ def backward_D_B(self):
+ """Calculate GAN loss for discriminator D_B"""
+ fake_A = self.fake_A_pool.query(self.fake_A)
+ self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
+
+ def backward_G(self):
+ """Calculate the loss for generators G_A and G_B"""
+ lambda_idt = self.opt.lambda_identity
+ lambda_A = self.opt.lambda_A
+ lambda_B = self.opt.lambda_B
+ # Identity loss
+ if lambda_idt > 0:
+ # G_A should be identity if real_B is fed: ||G_A(B) - B||
+ self.idt_A = self.netG_A(self.real_B)
+ self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
+ # G_B should be identity if real_A is fed: ||G_B(A) - A||
+ self.idt_B = self.netG_B(self.real_A)
+ self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
+ else:
+ self.loss_idt_A = 0
+ self.loss_idt_B = 0
+
+ # GAN loss D_A(G_A(A))
+ self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
+ # GAN loss D_B(G_B(B))
+ self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
+ # Forward cycle loss || G_B(G_A(A)) - A||
+ self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
+ # Backward cycle loss || G_A(G_B(B)) - B||
+ self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
+ # combined loss and calculate gradients
+ self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
+ self.loss_G.backward()
+
+ def optimize_parameters(self):
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
+ # forward
+ self.forward() # compute fake images and reconstruction images.
+ # G_A and G_B
+ self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
+ self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
+ self.backward_G() # calculate gradients for G_A and G_B
+ self.optimizer_G.step() # update G_A and G_B's weights
+ # D_A and D_B
+ self.set_requires_grad([self.netD_A, self.netD_B], True)
+ self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
+ self.backward_D_A() # calculate gradients for D_A
+ self.backward_D_B() # calculate graidents for D_B
+ self.optimizer_D.step() # update D_A and D_B's weights
diff --git a/models/networks.py b/models/networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..f46237142a8629f344aeb3a1e3c3fb16a7392341
--- /dev/null
+++ b/models/networks.py
@@ -0,0 +1,616 @@
+import torch
+import torch.nn as nn
+from torch.nn import init
+import functools
+from torch.optim import lr_scheduler
+
+
+###############################################################################
+# Helper Functions
+###############################################################################
+
+
+class Identity(nn.Module):
+ def forward(self, x):
+ return x
+
+
+def get_norm_layer(norm_type='instance'):
+ """Return a normalization layer
+
+ Parameters:
+ norm_type (str) -- the name of the normalization layer: batch | instance | none
+
+ For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
+ For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
+ """
+ if norm_type == 'batch':
+ norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
+ elif norm_type == 'instance':
+ norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
+ elif norm_type == 'none':
+ def norm_layer(x):
+ return Identity()
+ else:
+ raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
+ return norm_layer
+
+
+def get_scheduler(optimizer, opt):
+ """Return a learning rate scheduler
+
+ Parameters:
+ optimizer -- the optimizer of the network
+ opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
+ opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
+
+ For 'linear', we keep the same learning rate for the first epochs
+ and linearly decay the rate to zero over the next epochs.
+ For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
+ See https://pytorch.org/docs/stable/optim.html for more details.
+ """
+ if opt.lr_policy == 'linear':
+ def lambda_rule(epoch):
+ lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
+ return lr_l
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
+ elif opt.lr_policy == 'step':
+ scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
+ elif opt.lr_policy == 'plateau':
+ scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
+ elif opt.lr_policy == 'cosine':
+ scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
+ else:
+ return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
+ return scheduler
+
+
+def init_weights(net, init_type='normal', init_gain=0.02):
+ """Initialize network weights.
+
+ Parameters:
+ net (network) -- network to be initialized
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
+
+ We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
+ work better for some applications. Feel free to try yourself.
+ """
+ def init_func(m): # define the initialization function
+ classname = m.__class__.__name__
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
+ if init_type == 'normal':
+ init.normal_(m.weight.data, 0.0, init_gain)
+ elif init_type == 'xavier':
+ init.xavier_normal_(m.weight.data, gain=init_gain)
+ elif init_type == 'kaiming':
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
+ elif init_type == 'orthogonal':
+ init.orthogonal_(m.weight.data, gain=init_gain)
+ else:
+ raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
+ if hasattr(m, 'bias') and m.bias is not None:
+ init.constant_(m.bias.data, 0.0)
+ elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
+ init.normal_(m.weight.data, 1.0, init_gain)
+ init.constant_(m.bias.data, 0.0)
+
+ print('initialize network with %s' % init_type)
+ net.apply(init_func) # apply the initialization function
+
+
+def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
+ """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
+ Parameters:
+ net (network) -- the network to be initialized
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
+ gain (float) -- scaling factor for normal, xavier and orthogonal.
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
+
+ Return an initialized network.
+ """
+ if len(gpu_ids) > 0:
+ assert(torch.cuda.is_available())
+ net.to(gpu_ids[0])
+ net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
+ init_weights(net, init_type, init_gain=init_gain)
+ return net
+
+
+def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
+ """Create a generator
+
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ output_nc (int) -- the number of channels in output images
+ ngf (int) -- the number of filters in the last conv layer
+ netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
+ norm (str) -- the name of normalization layers used in the network: batch | instance | none
+ use_dropout (bool) -- if use dropout layers.
+ init_type (str) -- the name of our initialization method.
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
+
+ Returns a generator
+
+ Our current implementation provides two types of generators:
+ U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
+ The original U-Net paper: https://arxiv.org/abs/1505.04597
+
+ Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
+ Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
+ We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
+
+
+ The generator has been initialized by . It uses RELU for non-linearity.
+ """
+ net = None
+ norm_layer = get_norm_layer(norm_type=norm)
+
+ if netG == 'resnet_9blocks':
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
+ elif netG == 'resnet_6blocks':
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
+ elif netG == 'unet_128':
+ net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
+ elif netG == 'unet_256':
+ net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
+ else:
+ raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
+ return init_net(net, init_type, init_gain, gpu_ids)
+
+
+def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
+ """Create a discriminator
+
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ ndf (int) -- the number of filters in the first conv layer
+ netD (str) -- the architecture's name: basic | n_layers | pixel
+ n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
+ norm (str) -- the type of normalization layers used in the network.
+ init_type (str) -- the name of the initialization method.
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
+
+ Returns a discriminator
+
+ Our current implementation provides three types of discriminators:
+ [basic]: 'PatchGAN' classifier described in the original pix2pix paper.
+ It can classify whether 70×70 overlapping patches are real or fake.
+ Such a patch-level discriminator architecture has fewer parameters
+ than a full-image discriminator and can work on arbitrarily-sized images
+ in a fully convolutional fashion.
+
+ [n_layers]: With this mode, you can specify the number of conv layers in the discriminator
+ with the parameter (default=3 as used in [basic] (PatchGAN).)
+
+ [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
+ It encourages greater color diversity but has no effect on spatial statistics.
+
+ The discriminator has been initialized by . It uses Leakly RELU for non-linearity.
+ """
+ net = None
+ norm_layer = get_norm_layer(norm_type=norm)
+
+ if netD == 'basic': # default PatchGAN classifier
+ net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
+ elif netD == 'n_layers': # more options
+ net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
+ elif netD == 'pixel': # classify if each pixel is real or fake
+ net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
+ else:
+ raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
+ return init_net(net, init_type, init_gain, gpu_ids)
+
+
+##############################################################################
+# Classes
+##############################################################################
+class GANLoss(nn.Module):
+ """Define different GAN objectives.
+
+ The GANLoss class abstracts away the need to create the target label tensor
+ that has the same size as the input.
+ """
+
+ def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
+ """ Initialize the GANLoss class.
+
+ Parameters:
+ gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
+ target_real_label (bool) - - label for a real image
+ target_fake_label (bool) - - label of a fake image
+
+ Note: Do not use sigmoid as the last layer of Discriminator.
+ LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
+ """
+ super(GANLoss, self).__init__()
+ self.register_buffer('real_label', torch.tensor(target_real_label))
+ self.register_buffer('fake_label', torch.tensor(target_fake_label))
+ self.gan_mode = gan_mode
+ if gan_mode == 'lsgan':
+ self.loss = nn.MSELoss()
+ elif gan_mode == 'vanilla':
+ self.loss = nn.BCEWithLogitsLoss()
+ elif gan_mode in ['wgangp']:
+ self.loss = None
+ else:
+ raise NotImplementedError('gan mode %s not implemented' % gan_mode)
+
+ def get_target_tensor(self, prediction, target_is_real):
+ """Create label tensors with the same size as the input.
+
+ Parameters:
+ prediction (tensor) - - tpyically the prediction from a discriminator
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
+
+ Returns:
+ A label tensor filled with ground truth label, and with the size of the input
+ """
+
+ if target_is_real:
+ target_tensor = self.real_label
+ else:
+ target_tensor = self.fake_label
+ return target_tensor.expand_as(prediction)
+
+ def __call__(self, prediction, target_is_real):
+ """Calculate loss given Discriminator's output and grount truth labels.
+
+ Parameters:
+ prediction (tensor) - - tpyically the prediction output from a discriminator
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
+
+ Returns:
+ the calculated loss.
+ """
+ if self.gan_mode in ['lsgan', 'vanilla']:
+ target_tensor = self.get_target_tensor(prediction, target_is_real)
+ loss = self.loss(prediction, target_tensor)
+ elif self.gan_mode == 'wgangp':
+ if target_is_real:
+ loss = -prediction.mean()
+ else:
+ loss = prediction.mean()
+ return loss
+
+
+def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
+ """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
+
+ Arguments:
+ netD (network) -- discriminator network
+ real_data (tensor array) -- real images
+ fake_data (tensor array) -- generated images from the generator
+ device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
+ type (str) -- if we mix real and fake data or not [real | fake | mixed].
+ constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
+ lambda_gp (float) -- weight for this loss
+
+ Returns the gradient penalty loss
+ """
+ if lambda_gp > 0.0:
+ if type == 'real': # either use real images, fake images, or a linear interpolation of two.
+ interpolatesv = real_data
+ elif type == 'fake':
+ interpolatesv = fake_data
+ elif type == 'mixed':
+ alpha = torch.rand(real_data.shape[0], 1, device=device)
+ alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
+ interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
+ else:
+ raise NotImplementedError('{} not implemented'.format(type))
+ interpolatesv.requires_grad_(True)
+ disc_interpolates = netD(interpolatesv)
+ gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
+ grad_outputs=torch.ones(disc_interpolates.size()).to(device),
+ create_graph=True, retain_graph=True, only_inputs=True)
+ gradients = gradients[0].view(real_data.size(0), -1) # flat the data
+ gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
+ return gradient_penalty, gradients
+ else:
+ return 0.0, None
+
+
+class ResnetGenerator(nn.Module):
+ """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
+
+ We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
+ """
+
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
+ """Construct a Resnet-based generator
+
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ output_nc (int) -- the number of channels in output images
+ ngf (int) -- the number of filters in the last conv layer
+ norm_layer -- normalization layer
+ use_dropout (bool) -- if use dropout layers
+ n_blocks (int) -- the number of ResNet blocks
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
+ """
+ assert(n_blocks >= 0)
+ super(ResnetGenerator, self).__init__()
+ if type(norm_layer) == functools.partial:
+ use_bias = norm_layer.func == nn.InstanceNorm2d
+ else:
+ use_bias = norm_layer == nn.InstanceNorm2d
+
+ model = [nn.ReflectionPad2d(3),
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
+ norm_layer(ngf),
+ nn.ReLU(True)]
+
+ n_downsampling = 2
+ for i in range(n_downsampling): # add downsampling layers
+ mult = 2 ** i
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
+ norm_layer(ngf * mult * 2),
+ nn.ReLU(True)]
+
+ mult = 2 ** n_downsampling
+ for i in range(n_blocks): # add ResNet blocks
+
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
+
+ for i in range(n_downsampling): # add upsampling layers
+ mult = 2 ** (n_downsampling - i)
+ model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
+ kernel_size=3, stride=2,
+ padding=1, output_padding=1,
+ bias=use_bias),
+ norm_layer(int(ngf * mult / 2)),
+ nn.ReLU(True)]
+ model += [nn.ReflectionPad2d(3)]
+ model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
+ model += [nn.Tanh()]
+
+ self.model = nn.Sequential(*model)
+
+ def forward(self, input):
+ """Standard forward"""
+ return self.model(input)
+
+
+class ResnetBlock(nn.Module):
+ """Define a Resnet block"""
+
+ def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
+ """Initialize the Resnet block
+
+ A resnet block is a conv block with skip connections
+ We construct a conv block with build_conv_block function,
+ and implement skip connections in function.
+ Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
+ """
+ super(ResnetBlock, self).__init__()
+ self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
+
+ def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
+ """Construct a convolutional block.
+
+ Parameters:
+ dim (int) -- the number of channels in the conv layer.
+ padding_type (str) -- the name of padding layer: reflect | replicate | zero
+ norm_layer -- normalization layer
+ use_dropout (bool) -- if use dropout layers.
+ use_bias (bool) -- if the conv layer uses bias or not
+
+ Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
+ """
+ conv_block = []
+ p = 0
+ if padding_type == 'reflect':
+ conv_block += [nn.ReflectionPad2d(1)]
+ elif padding_type == 'replicate':
+ conv_block += [nn.ReplicationPad2d(1)]
+ elif padding_type == 'zero':
+ p = 1
+ else:
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
+ if use_dropout:
+ conv_block += [nn.Dropout(0.5)]
+
+ p = 0
+ if padding_type == 'reflect':
+ conv_block += [nn.ReflectionPad2d(1)]
+ elif padding_type == 'replicate':
+ conv_block += [nn.ReplicationPad2d(1)]
+ elif padding_type == 'zero':
+ p = 1
+ else:
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
+
+ return nn.Sequential(*conv_block)
+
+ def forward(self, x):
+ """Forward function (with skip connections)"""
+ out = x + self.conv_block(x) # add skip connections
+ return out
+
+
+class UnetGenerator(nn.Module):
+ """Create a Unet-based generator"""
+
+ def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
+ """Construct a Unet generator
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ output_nc (int) -- the number of channels in output images
+ num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
+ image of size 128x128 will become of size 1x1 # at the bottleneck
+ ngf (int) -- the number of filters in the last conv layer
+ norm_layer -- normalization layer
+
+ We construct the U-Net from the innermost layer to the outermost layer.
+ It is a recursive process.
+ """
+ super(UnetGenerator, self).__init__()
+ # construct unet structure
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
+ for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
+ # gradually reduce the number of filters from ngf * 8 to ngf
+ unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
+ unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
+ unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
+ self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
+
+ def forward(self, input):
+ """Standard forward"""
+ return self.model(input)
+
+
+class UnetSkipConnectionBlock(nn.Module):
+ """Defines the Unet submodule with skip connection.
+ X -------------------identity----------------------
+ |-- downsampling -- |submodule| -- upsampling --|
+ """
+
+ def __init__(self, outer_nc, inner_nc, input_nc=None,
+ submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
+ """Construct a Unet submodule with skip connections.
+
+ Parameters:
+ outer_nc (int) -- the number of filters in the outer conv layer
+ inner_nc (int) -- the number of filters in the inner conv layer
+ input_nc (int) -- the number of channels in input images/features
+ submodule (UnetSkipConnectionBlock) -- previously defined submodules
+ outermost (bool) -- if this module is the outermost module
+ innermost (bool) -- if this module is the innermost module
+ norm_layer -- normalization layer
+ use_dropout (bool) -- if use dropout layers.
+ """
+ super(UnetSkipConnectionBlock, self).__init__()
+ self.outermost = outermost
+ if type(norm_layer) == functools.partial:
+ use_bias = norm_layer.func == nn.InstanceNorm2d
+ else:
+ use_bias = norm_layer == nn.InstanceNorm2d
+ if input_nc is None:
+ input_nc = outer_nc
+ downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
+ stride=2, padding=1, bias=use_bias)
+ downrelu = nn.LeakyReLU(0.2, True)
+ downnorm = norm_layer(inner_nc)
+ uprelu = nn.ReLU(True)
+ upnorm = norm_layer(outer_nc)
+
+ if outermost:
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
+ kernel_size=4, stride=2,
+ padding=1)
+ down = [downconv]
+ up = [uprelu, upconv, nn.Tanh()]
+ model = down + [submodule] + up
+ elif innermost:
+ upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
+ kernel_size=4, stride=2,
+ padding=1, bias=use_bias)
+ down = [downrelu, downconv]
+ up = [uprelu, upconv, upnorm]
+ model = down + up
+ else:
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
+ kernel_size=4, stride=2,
+ padding=1, bias=use_bias)
+ down = [downrelu, downconv, downnorm]
+ up = [uprelu, upconv, upnorm]
+
+ if use_dropout:
+ model = down + [submodule] + up + [nn.Dropout(0.5)]
+ else:
+ model = down + [submodule] + up
+
+ self.model = nn.Sequential(*model)
+
+ def forward(self, x):
+ if self.outermost:
+ return self.model(x)
+ else: # add skip connections
+ return torch.cat([x, self.model(x)], 1)
+
+
+class NLayerDiscriminator(nn.Module):
+ """Defines a PatchGAN discriminator"""
+
+ def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
+ """Construct a PatchGAN discriminator
+
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ ndf (int) -- the number of filters in the last conv layer
+ n_layers (int) -- the number of conv layers in the discriminator
+ norm_layer -- normalization layer
+ """
+ super(NLayerDiscriminator, self).__init__()
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
+ use_bias = norm_layer.func == nn.InstanceNorm2d
+ else:
+ use_bias = norm_layer == nn.InstanceNorm2d
+
+ kw = 4
+ padw = 1
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
+ nf_mult = 1
+ nf_mult_prev = 1
+ for n in range(1, n_layers): # gradually increase the number of filters
+ nf_mult_prev = nf_mult
+ nf_mult = min(2 ** n, 8)
+ sequence += [
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
+ norm_layer(ndf * nf_mult),
+ nn.LeakyReLU(0.2, True)
+ ]
+
+ nf_mult_prev = nf_mult
+ nf_mult = min(2 ** n_layers, 8)
+ sequence += [
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
+ norm_layer(ndf * nf_mult),
+ nn.LeakyReLU(0.2, True)
+ ]
+
+ sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
+ self.model = nn.Sequential(*sequence)
+
+ def forward(self, input):
+ """Standard forward."""
+ return self.model(input)
+
+
+class PixelDiscriminator(nn.Module):
+ """Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
+
+ def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
+ """Construct a 1x1 PatchGAN discriminator
+
+ Parameters:
+ input_nc (int) -- the number of channels in input images
+ ndf (int) -- the number of filters in the last conv layer
+ norm_layer -- normalization layer
+ """
+ super(PixelDiscriminator, self).__init__()
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
+ use_bias = norm_layer.func == nn.InstanceNorm2d
+ else:
+ use_bias = norm_layer == nn.InstanceNorm2d
+
+ self.net = [
+ nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
+ nn.LeakyReLU(0.2, True),
+ nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
+ norm_layer(ndf * 2),
+ nn.LeakyReLU(0.2, True),
+ nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
+
+ self.net = nn.Sequential(*self.net)
+
+ def forward(self, input):
+ """Standard forward."""
+ return self.net(input)
diff --git a/models/pix2pix_model.py b/models/pix2pix_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a2b15826438f15966bf73fa631c9748d91e207
--- /dev/null
+++ b/models/pix2pix_model.py
@@ -0,0 +1,127 @@
+import torch
+from .base_model import BaseModel
+from . import networks
+
+
+class Pix2PixModel(BaseModel):
+ """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
+
+ The model training requires '--dataset_mode aligned' dataset.
+ By default, it uses a '--netG unet256' U-Net generator,
+ a '--netD basic' discriminator (PatchGAN),
+ and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
+
+ pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
+ """
+ @staticmethod
+ def modify_commandline_options(parser, is_train=True):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+
+ For pix2pix, we do not use image buffer
+ The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
+ By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
+ """
+ # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
+ parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
+ if is_train:
+ parser.set_defaults(pool_size=0, gan_mode='vanilla')
+ parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
+
+ return parser
+
+ def __init__(self, opt):
+ """Initialize the pix2pix class.
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ BaseModel.__init__(self, opt)
+ # specify the training losses you want to print out. The training/test scripts will call
+ self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
+ # specify the images you want to save/display. The training/test scripts will call
+ self.visual_names = ['real_A', 'fake_B', 'real_B']
+ # specify the models you want to save to the disk. The training/test scripts will call and
+ if self.isTrain:
+ self.model_names = ['G', 'D']
+ else: # during test time, only load G
+ self.model_names = ['G']
+ # define networks (both generator and discriminator)
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
+ not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
+
+ if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
+ self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
+ opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
+
+ if self.isTrain:
+ # define loss functions
+ self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
+ self.criterionL1 = torch.nn.L1Loss()
+ # initialize optimizers; schedulers will be automatically created by function .
+ self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
+ self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
+ self.optimizers.append(self.optimizer_G)
+ self.optimizers.append(self.optimizer_D)
+
+ def set_input(self, input):
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
+
+ Parameters:
+ input (dict): include the data itself and its metadata information.
+
+ The option 'direction' can be used to swap images in domain A and domain B.
+ """
+ AtoB = self.opt.direction == 'AtoB'
+ self.real_A = input['A' if AtoB else 'B'].to(self.device)
+ self.real_B = input['B' if AtoB else 'A'].to(self.device)
+ self.image_paths = input['A_paths' if AtoB else 'B_paths']
+
+ def forward(self):
+ """Run forward pass; called by both functions and ."""
+ self.fake_B = self.netG(self.real_A) # G(A)
+
+ def backward_D(self):
+ """Calculate GAN loss for the discriminator"""
+ # Fake; stop backprop to the generator by detaching fake_B
+ fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
+ pred_fake = self.netD(fake_AB.detach())
+ self.loss_D_fake = self.criterionGAN(pred_fake, False)
+ # Real
+ real_AB = torch.cat((self.real_A, self.real_B), 1)
+ pred_real = self.netD(real_AB)
+ self.loss_D_real = self.criterionGAN(pred_real, True)
+ # combine loss and calculate gradients
+ self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
+ self.loss_D.backward()
+
+ def backward_G(self):
+ """Calculate GAN and L1 loss for the generator"""
+ # First, G(A) should fake the discriminator
+ fake_AB = torch.cat((self.real_A, self.fake_B), 1)
+ pred_fake = self.netD(fake_AB)
+ self.loss_G_GAN = self.criterionGAN(pred_fake, True)
+ # Second, G(A) = B
+ self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
+ # combine loss and calculate gradients
+ self.loss_G = self.loss_G_GAN + self.loss_G_L1
+ self.loss_G.backward()
+
+ def optimize_parameters(self):
+ self.forward() # compute fake images: G(A)
+ # update D
+ self.set_requires_grad(self.netD, True) # enable backprop for D
+ self.optimizer_D.zero_grad() # set D's gradients to zero
+ self.backward_D() # calculate gradients for D
+ self.optimizer_D.step() # update D's weights
+ # update G
+ self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
+ self.optimizer_G.zero_grad() # set G's gradients to zero
+ self.backward_G() # calculate graidents for G
+ self.optimizer_G.step() # update G's weights
diff --git a/models/template_model.py b/models/template_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..68cdaf6a9a2cb321ff2a01949b38adc6fa22e97c
--- /dev/null
+++ b/models/template_model.py
@@ -0,0 +1,99 @@
+"""Model class template
+
+This module provides a template for users to implement custom models.
+You can specify '--model template' to use this model.
+The class name should be consistent with both the filename and its model option.
+The filename should be _dataset.py
+The class name should be Dataset.py
+It implements a simple image-to-image translation baseline based on regression loss.
+Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
+ min_ ||netG(data_A) - data_B||_1
+You need to implement the following functions:
+ : Add model-specific options and rewrite default values for existing options.
+ <__init__>: Initialize this model class.
+ : Unpack input data and perform data pre-processing.
+ : Run forward pass. This will be called by both and .
+ : Update network weights; it will be called in every training iteration.
+"""
+import torch
+from .base_model import BaseModel
+from . import networks
+
+
+class TemplateModel(BaseModel):
+ @staticmethod
+ def modify_commandline_options(parser, is_train=True):
+ """Add new model-specific options and rewrite default values for existing options.
+
+ Parameters:
+ parser -- the option parser
+ is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+ """
+ parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
+ if is_train:
+ parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
+
+ return parser
+
+ def __init__(self, opt):
+ """Initialize this model class.
+
+ Parameters:
+ opt -- training/test options
+
+ A few things can be done here.
+ - (required) call the initialization function of BaseModel
+ - define loss function, visualization images, model names, and optimizers
+ """
+ BaseModel.__init__(self, opt) # call the initialization method of BaseModel
+ # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
+ self.loss_names = ['loss_G']
+ # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
+ self.visual_names = ['data_A', 'data_B', 'output']
+ # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
+ # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
+ self.model_names = ['G']
+ # define networks; you can use opt.isTrain to specify different behaviors for training and test.
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
+ if self.isTrain: # only defined during training time
+ # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
+ # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
+ self.criterionLoss = torch.nn.L1Loss()
+ # define and initialize optimizers. You can define one optimizer for each network.
+ # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
+ self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
+ self.optimizers = [self.optimizer]
+
+ # Our program will automatically call to define schedulers, load networks, and print networks
+
+ def set_input(self, input):
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
+
+ Parameters:
+ input: a dictionary that contains the data itself and its metadata information.
+ """
+ AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B
+ self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
+ self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
+ self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
+
+ def forward(self):
+ """Run forward pass. This will be called by both functions and ."""
+ self.output = self.netG(self.data_A) # generate output image given the input data_A
+
+ def backward(self):
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
+ # caculate the intermediate results if necessary; here self.output has been computed during function
+ # calculate loss given the input and intermediate results
+ self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
+ self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
+
+ def optimize_parameters(self):
+ """Update network weights; it will be called in every training iteration."""
+ self.forward() # first call forward to calculate intermediate results
+ self.optimizer.zero_grad() # clear network G's existing gradients
+ self.backward() # calculate gradients for network G
+ self.optimizer.step() # update gradients for network G
diff --git a/models/test_model.py b/models/test_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe15f40176e421270de5cb6b6c711fbc55e933e8
--- /dev/null
+++ b/models/test_model.py
@@ -0,0 +1,69 @@
+from .base_model import BaseModel
+from . import networks
+
+
+class TestModel(BaseModel):
+ """ This TesteModel can be used to generate CycleGAN results for only one direction.
+ This model will automatically set '--dataset_mode single', which only loads the images from one collection.
+
+ See the test instruction for more details.
+ """
+ @staticmethod
+ def modify_commandline_options(parser, is_train=True):
+ """Add new dataset-specific options, and rewrite default values for existing options.
+
+ Parameters:
+ parser -- original option parser
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
+
+ Returns:
+ the modified parser.
+
+ The model can only be used during test time. It requires '--dataset_mode single'.
+ You need to specify the network using the option '--model_suffix'.
+ """
+ assert not is_train, 'TestModel cannot be used during training time'
+ parser.set_defaults(dataset_mode='single')
+ parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
+
+ return parser
+
+ def __init__(self, opt):
+ """Initialize the pix2pix class.
+
+ Parameters:
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
+ """
+ assert(not opt.isTrain)
+ BaseModel.__init__(self, opt)
+ # specify the training losses you want to print out. The training/test scripts will call
+ self.loss_names = []
+ # specify the images you want to save/display. The training/test scripts will call
+ self.visual_names = ['real', 'fake']
+ # specify the models you want to save to the disk. The training/test scripts will call and
+ self.model_names = ['G' + opt.model_suffix] # only generator is needed.
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
+ opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
+
+ # assigns the model to self.netG_[suffix] so that it can be loaded
+ # please see
+ setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
+
+ def set_input(self, input):
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
+
+ Parameters:
+ input: a dictionary that contains the data itself and its metadata information.
+
+ We need to use 'single_dataset' dataset mode. It only load images from one domain.
+ """
+ self.real = input['A'].to(self.device)
+ self.image_paths = input['A_paths']
+
+ def forward(self):
+ """Run forward pass."""
+ self.fake = self.netG(self.real) # G(real)
+
+ def optimize_parameters(self):
+ """No optimization for test model."""
+ pass
diff --git a/options/__init__.py b/options/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7eedebe54aa70169fd25951b3034d819e396c90
--- /dev/null
+++ b/options/__init__.py
@@ -0,0 +1 @@
+"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
diff --git a/options/__pycache__/__init__.cpython-39.pyc b/options/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fbe682bbd3281f896a3a2e4194a07d0696795d47
Binary files /dev/null and b/options/__pycache__/__init__.cpython-39.pyc differ
diff --git a/options/__pycache__/base_options.cpython-39.pyc b/options/__pycache__/base_options.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e301880a63681461294c693a2339f9689f20c500
Binary files /dev/null and b/options/__pycache__/base_options.cpython-39.pyc differ
diff --git a/options/__pycache__/test_options.cpython-39.pyc b/options/__pycache__/test_options.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b245f1507b14a5fe6611e940fffb169903a7985c
Binary files /dev/null and b/options/__pycache__/test_options.cpython-39.pyc differ
diff --git a/options/base_options.py b/options/base_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..696a488a53491d9cc41756986d8a5eb52342e49b
--- /dev/null
+++ b/options/base_options.py
@@ -0,0 +1,93 @@
+import argparse
+import os
+from util import util
+import torch
+import models
+import data
+
+
+class BaseOptions():
+ """This class defines options used during both training and test time.
+
+ It also implements several helper functions such as parsing, printing, and saving the options.
+ It also gathers additional options defined in functions in both dataset class and model class.
+ """
+
+ def __init__(self):
+ """Reset the class; indicates the class hasn't been initailized"""
+ self.initialized = False
+
+ def initialize(self, parser):
+ """Define the common options that are used in both training and test."""
+ # basic parameters
+ parser.add_argument('--dataroot', required=False, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
+ parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
+ parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
+ parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
+ # model parameters
+ parser.add_argument('--model', type=str, default='colorization', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
+ parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
+ parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
+ parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
+ parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
+ parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
+ parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
+ parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
+ parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
+ parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
+ parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
+ parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
+ # dataset parameters
+ parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
+ parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
+ parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
+ parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
+ parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
+ parser.add_argument('--load_size', type=int, default=None, help='scale images to this size')
+ parser.add_argument('--crop_size', type=int, default=None, help='then crop to this size')
+ parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
+ parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
+ parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
+ parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
+ # additional parameters
+ parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
+ parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
+ parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
+ parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
+ # wandb parameters
+ parser.add_argument('--use_wandb', action='store_true', help='if specified, then init wandb logging')
+ parser.add_argument('--wandb_project_name', type=str, default='CycleGAN-and-pix2pix', help='specify wandb project name')
+ self.initialized = True
+ return parser
+
+def parse(self):
+ if not self.initialized:
+ self.initialize()
+ self.opt = self.parser.parse_args()
+ self.opt.isTrain = self.isTrain # train or test
+
+ str_ids = self.opt.gpu_ids.split(',')
+ self.opt.gpu_ids = []
+ for str_id in str_ids:
+ id = int(str_id)
+ if id >= 0:
+ self.opt.gpu_ids.append(id)
+
+ args = vars(self.opt)
+
+ print('------------ Options -------------')
+ for k, v in sorted(args.items()):
+ print('%s: %s' % (str(k), str(v)))
+ print('-------------- End ----------------')
+
+ # save to the disk
+ expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
+ util.mkdirs(expr_dir)
+ file_name = os.path.join(expr_dir, 'opt.txt')
+ with open(file_name, 'wt') as opt_file:
+ opt_file.write('------------ Options -------------\n')
+ for k, v in sorted(args.items()):
+ opt_file.write('%s: %s\n' % (str(k), str(v)))
+ opt_file.write('-------------- End ----------------\n')
+ return self.opt
+
diff --git a/options/test_options.py b/options/test_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6295d154ad4af1c03efb973c458380fe9e2a0f
--- /dev/null
+++ b/options/test_options.py
@@ -0,0 +1,92 @@
+from .base_options import BaseOptions
+
+
+class TestOptions(BaseOptions):
+ """This class includes test options.
+
+ It also includes shared options defined in BaseOptions.
+ """
+
+ def initialize(self, parser):
+ parser = BaseOptions.initialize(self, parser) # define shared options
+ parser.add_argument('--image_path', type=str, help='path to image for generate.')
+ parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
+ parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
+ parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
+ parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
+ parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
+ parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
+ # Dropout and Batchnorm has different behavioir during training and test.
+ parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
+ parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
+ # rewrite devalue values
+ # To avoid cropping, the load_size should be the same as crop_size
+ parser.set_defaults(load_size=parser.get_default('crop_size'))
+ self.isTrain = False
+ return parser
+
+
+
+def generate():
+ """
+ generate single image specific by image path, and show the after generated image
+ :return:
+ """
+ image_path = opt.image_path
+ print('generate from {}'.format(image_path))
+
+ data = load_image_for_prediction(opt, image_path)
+
+ model = OneDirectionTestModel()
+ model.initialize(opt=opt)
+ model.set_input(data)
+ model.test()
+
+ visuals = model.get_current_visuals()
+ generated_a = visuals['fake_B']
+
+ image_generated = Image.fromarray(generated_a)
+ image_generated.save(str(os.path.basename(image_path).split('.')[0]) + '_fake_b.jpg')
+
+ combined_result = np.concatenate([img for _, img in visuals.items()], 1)
+ image_combined = Image.fromarray(combined_result)
+ image_combined.save(str(os.path.basename(image_path).split('.')[0]) + '_combined.jpg')
+ image_combined.show()
+ print('generated image saved.')
+
+
+
+if __name__ == '__main__':
+ generate()
+
+def parse(self):
+ if not self.initialized:
+ self.initialize()
+ self.opt = self.parser.parse_args()
+ self.opt.isTrain = self.isTrain # train or test
+
+ str_ids = self.opt.gpu_ids.split(',')
+ self.opt.gpu_ids = []
+ for str_id in str_ids:
+ id = int(str_id)
+ if id >= 0:
+ self.opt.gpu_ids.append(id)
+
+ args = vars(self.opt)
+
+ print('------------ Options -------------')
+ for k, v in sorted(args.items()):
+ print('%s: %s' % (str(k), str(v)))
+ print('-------------- End ----------------')
+
+ # save to the disk
+ expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
+ util.mkdirs(expr_dir)
+ file_name = os.path.join(expr_dir, 'opt.txt')
+ with open(file_name, 'wt') as opt_file:
+ opt_file.write('------------ Options -------------\n')
+ for k, v in sorted(args.items()):
+ opt_file.write('%s: %s\n' % (str(k), str(v)))
+ opt_file.write('-------------- End ----------------\n')
+ return self.opt
+
diff --git a/options/train_options.py b/options/train_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8d5d2a92a916b385da08fa29a864547e114fb07
--- /dev/null
+++ b/options/train_options.py
@@ -0,0 +1,40 @@
+from .base_options import BaseOptions
+
+
+class TrainOptions(BaseOptions):
+ """This class includes training options.
+
+ It also includes shared options defined in BaseOptions.
+ """
+
+ def initialize(self, parser):
+ parser = BaseOptions.initialize(self, parser)
+ # visdom and HTML visualization parameters
+ parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
+ parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
+ parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
+ parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
+ parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
+ parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
+ parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
+ parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
+ parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
+ # network saving and loading parameters
+ parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
+ parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
+ parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
+ parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
+ parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')
+ parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
+ # training parameters
+ parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
+ parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
+ parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
+ parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
+ parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
+ parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
+ parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
+ parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
+
+ self.isTrain = True
+ return parser
diff --git a/pix2pix.ipynb b/pix2pix.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b5d9d85913ba39ecae71d73d9d02656ab174c40f
--- /dev/null
+++ b/pix2pix.ipynb
@@ -0,0 +1,283 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "7wNjDKdQy35h"
+ },
+ "source": [
+ "# Install"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "TRm-USlsHgEV"
+ },
+ "outputs": [],
+ "source": [
+ "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "Pt3igws3eiVp"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "z1EySlOXwwoa"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -r requirements.txt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "8daqlgVhw29P"
+ },
+ "source": [
+ "# Datasets\n",
+ "\n",
+ "Download one of the official datasets with:\n",
+ "\n",
+ "- `bash ./datasets/download_pix2pix_dataset.sh [cityscapes, night2day, edges2handbags, edges2shoes, facades, maps]`\n",
+ "\n",
+ "Or use your own dataset by creating the appropriate folders and adding in the images. Follow the instructions [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md#pix2pix-datasets)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "vrdOettJxaCc"
+ },
+ "outputs": [],
+ "source": [
+ "!bash ./datasets/download_pix2pix_dataset.sh facades"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "gdUz4116xhpm"
+ },
+ "source": [
+ "# Pretrained models\n",
+ "\n",
+ "Download one of the official pretrained models with:\n",
+ "\n",
+ "- `bash ./scripts/download_pix2pix_model.sh [edges2shoes, sat2map, map2sat, facades_label2photo, and day2night]`\n",
+ "\n",
+ "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "GC2DEP4M0OsS"
+ },
+ "outputs": [],
+ "source": [
+ "!bash ./scripts/download_pix2pix_model.sh facades_label2photo"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "yFw1kDQBx3LN"
+ },
+ "source": [
+ "# Training\n",
+ "\n",
+ "- `python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA`\n",
+ "\n",
+ "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. Add `--direction BtoA` if you want to train a model to transfrom from class B to A."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0sp7TCT2x9dB"
+ },
+ "outputs": [],
+ "source": [
+ "!python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA --display_id -1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "9UkcaFZiyASl"
+ },
+ "source": [
+ "# Testing\n",
+ "\n",
+ "- `python test.py --dataroot ./datasets/facades --direction BtoA --model pix2pix --name facades_pix2pix`\n",
+ "\n",
+ "Change the `--dataroot`, `--name`, and `--direction` to be consistent with your trained model's configuration and how you want to transform images.\n",
+ "\n",
+ "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
+ "> Note that we specified --direction BtoA as Facades dataset's A to B direction is photos to labels.\n",
+ "\n",
+ "> If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use --model test option. See ./scripts/test_single.sh for how to apply a model to Facade label maps (stored in the directory facades/testB).\n",
+ "\n",
+ "> See a list of currently available models at ./scripts/download_pix2pix_model.sh"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "mey7o6j-0368"
+ },
+ "outputs": [],
+ "source": [
+ "!ls checkpoints/"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "uCsKkEq0yGh0"
+ },
+ "outputs": [],
+ "source": [
+ "!python test.py --dataroot ./datasets/facades --direction BtoA --model pix2pix --name facades_label2photo_pretrained --use_wandb"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "OzSKIPUByfiN"
+ },
+ "source": [
+ "# Visualize"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "9Mgg8raPyizq"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_fake_B.png')\n",
+ "plt.imshow(img)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0G3oVH9DyqLQ"
+ },
+ "outputs": [],
+ "source": [
+ "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_A.png')\n",
+ "plt.imshow(img)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "ErK5OC1j1LH4"
+ },
+ "outputs": [],
+ "source": [
+ "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_B.png')\n",
+ "plt.imshow(img)"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "collapsed_sections": [],
+ "include_colab_link": true,
+ "name": "pix2pix",
+ "provenance": []
+ },
+ "environment": {
+ "name": "tf2-gpu.2-3.m74",
+ "type": "gcloud",
+ "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.10"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..082a9b57f2190810ac87267ef749335932a237aa
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,5 @@
+torch>=1.4.0
+torchvision>=0.5.0
+dominate>=2.4.0
+visdom>=0.1.8.8
+wandb
diff --git a/scripts/conda_deps.sh b/scripts/conda_deps.sh
new file mode 100644
index 0000000000000000000000000000000000000000..72df436f0e9f3d0419a3e9e818c5be87b3a07fbb
--- /dev/null
+++ b/scripts/conda_deps.sh
@@ -0,0 +1,4 @@
+set -ex
+conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing
+conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9
+conda install visdom dominate -c conda-forge # install visdom and dominate
diff --git a/scripts/download_cyclegan_model.sh b/scripts/download_cyclegan_model.sh
new file mode 100644
index 0000000000000000000000000000000000000000..26e198a44aa5f788b7c6268005570b0e916d430c
--- /dev/null
+++ b/scripts/download_cyclegan_model.sh
@@ -0,0 +1,11 @@
+FILE=$1
+
+echo "Note: available models are apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower"
+
+echo "Specified [$FILE]"
+
+mkdir -p ./checkpoints/${FILE}_pretrained
+MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
+URL=http://efrosgans.eecs.berkeley.edu/cyclegan/pretrained_models/$FILE.pth
+
+wget -N $URL -O $MODEL_FILE
diff --git a/scripts/download_pix2pix_model.sh b/scripts/download_pix2pix_model.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6b21232f074c7b9ee05bc04b93a803d1c0030ed1
--- /dev/null
+++ b/scripts/download_pix2pix_model.sh
@@ -0,0 +1,10 @@
+FILE=$1
+
+echo "Note: available models are edges2shoes, sat2map, map2sat, facades_label2photo, and day2night"
+echo "Specified [$FILE]"
+
+mkdir -p ./checkpoints/${FILE}_pretrained
+MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
+URL=http://efrosgans.eecs.berkeley.edu/pix2pix/models-pytorch/$FILE.pth
+
+wget -N $URL -O $MODEL_FILE
diff --git a/scripts/edges/PostprocessHED.m b/scripts/edges/PostprocessHED.m
new file mode 100644
index 0000000000000000000000000000000000000000..78a99106ea6d769f271acbfc992d1b5965fc5031
--- /dev/null
+++ b/scripts/edges/PostprocessHED.m
@@ -0,0 +1,77 @@
+%%% Prerequisites
+% You need to get the cpp file edgesNmsMex.cpp from https://raw.githubusercontent.com/pdollar/edges/master/private/edgesNmsMex.cpp
+% and compile it in Matlab: mex edgesNmsMex.cpp
+% You also need to download and install Piotr's Computer Vision Matlab Toolbox: https://pdollar.github.io/toolbox/
+
+%%% parameters
+% hed_mat_dir: the hed mat file directory (the output of 'batch_hed.py')
+% edge_dir: the output HED edges directory
+% image_width: resize the edge map to [image_width, image_width]
+% threshold: threshold for image binarization (default 25.0/255.0)
+% small_edge: remove small edges (default 5)
+
+function [] = PostprocessHED(hed_mat_dir, edge_dir, image_width, threshold, small_edge)
+
+if ~exist(edge_dir, 'dir')
+ mkdir(edge_dir);
+end
+fileList = dir(fullfile(hed_mat_dir, '*.mat'));
+nFiles = numel(fileList);
+fprintf('find %d mat files\n', nFiles);
+
+for n = 1 : nFiles
+ if mod(n, 1000) == 0
+ fprintf('process %d/%d images\n', n, nFiles);
+ end
+ fileName = fileList(n).name;
+ filePath = fullfile(hed_mat_dir, fileName);
+ jpgName = strrep(fileName, '.mat', '.jpg');
+ edge_path = fullfile(edge_dir, jpgName);
+
+ if ~exist(edge_path, 'file')
+ E = GetEdge(filePath);
+ E = imresize(E,[image_width,image_width]);
+ E_simple = SimpleEdge(E, threshold, small_edge);
+ E_simple = uint8(E_simple*255);
+ imwrite(E_simple, edge_path, 'Quality',100);
+ end
+end
+end
+
+
+
+
+function [E] = GetEdge(filePath)
+load(filePath);
+E = 1-edge_predict;
+end
+
+function [E4] = SimpleEdge(E, threshold, small_edge)
+if nargin <= 1
+ threshold = 25.0/255.0;
+end
+
+if nargin <= 2
+ small_edge = 5;
+end
+
+if ndims(E) == 3
+ E = E(:,:,1);
+end
+
+E1 = 1 - E;
+E2 = EdgeNMS(E1);
+E3 = double(E2>=max(eps,threshold));
+E3 = bwmorph(E3,'thin',inf);
+E4 = bwareaopen(E3, small_edge);
+E4=1-E4;
+end
+
+function [E_nms] = EdgeNMS( E )
+E=single(E);
+[Ox,Oy] = gradient2(convTri(E,4));
+[Oxx,~] = gradient2(Ox);
+[Oxy,Oyy] = gradient2(Oy);
+O = mod(atan(Oyy.*sign(-Oxy)./(Oxx+1e-5)),pi);
+E_nms = edgesNmsMex(E,O,1,5,1.01,1);
+end
diff --git a/scripts/edges/batch_hed.py b/scripts/edges/batch_hed.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc611e1c1db9e5b4a6648a4605a93a9a9754a784
--- /dev/null
+++ b/scripts/edges/batch_hed.py
@@ -0,0 +1,81 @@
+# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb
+# Step 1: download the hed repo: https://github.com/s9xie/hed
+# Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/
+# Step 3: put this script under {caffe_root}/examples/hed/
+# Step 4: run the following script:
+# python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/
+# The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job.
+# For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.
+# Step 5: run the MATLAB post-processing script "PostprocessHED.m"
+
+
+import caffe
+import numpy as np
+from PIL import Image
+import os
+import argparse
+import sys
+import scipy.io as sio
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')
+ parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)
+ parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str)
+ parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str)
+ parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)
+ parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str)
+ parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)
+ parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)
+ args = parser.parse_args()
+ return args
+
+
+args = parse_args()
+for arg in vars(args):
+ print('[%s] =' % arg, getattr(args, arg))
+# Make sure that caffe is on the python path:
+caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/
+sys.path.insert(0, caffe_root + 'python')
+
+
+if not os.path.exists(args.hed_mat_dir):
+ print('create output directory %s' % args.hed_mat_dir)
+ os.makedirs(args.hed_mat_dir)
+
+imgList = os.listdir(args.images_dir)
+nImgs = len(imgList)
+print('#images = %d' % nImgs)
+
+caffe.set_mode_gpu()
+caffe.set_device(args.gpu_id)
+# load net
+net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
+# pad border
+border = args.border
+
+for i in range(nImgs):
+ if i % 500 == 0:
+ print('processing image %d/%d' % (i, nImgs))
+ im = Image.open(os.path.join(args.images_dir, imgList[i]))
+
+ in_ = np.array(im, dtype=np.float32)
+ in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')
+
+ in_ = in_[:, :, 0:3]
+ in_ = in_[:, :, ::-1]
+ in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
+ in_ = in_.transpose((2, 0, 1))
+ # remove the following two lines if testing with cpu
+
+ # shape for input (data blob is N x C x H x W), set data
+ net.blobs['data'].reshape(1, *in_.shape)
+ net.blobs['data'].data[...] = in_
+ # run net and take argmax for prediction
+ net.forward()
+ fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]
+ # get rid of the border
+ fuse = fuse[(border + 35):(-border + 35), (border + 35):(-border + 35)]
+ # save hed file to the disk
+ name, ext = os.path.splitext(imgList[i])
+ sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
diff --git a/scripts/eval_cityscapes/caffemodel/deploy.prototxt b/scripts/eval_cityscapes/caffemodel/deploy.prototxt
new file mode 100644
index 0000000000000000000000000000000000000000..f4d7e71e9243e41b36544424add6007b1eafc849
--- /dev/null
+++ b/scripts/eval_cityscapes/caffemodel/deploy.prototxt
@@ -0,0 +1,769 @@
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape {
+ dim: 1
+ dim: 3
+ dim: 500
+ dim: 500
+ }
+ }
+}
+layer {
+ name: "conv1_1"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv1_1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 64
+ pad: 100
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu1_1"
+ type: "ReLU"
+ bottom: "conv1_1"
+ top: "conv1_1"
+}
+layer {
+ name: "conv1_2"
+ type: "Convolution"
+ bottom: "conv1_1"
+ top: "conv1_2"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 64
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu1_2"
+ type: "ReLU"
+ bottom: "conv1_2"
+ top: "conv1_2"
+}
+layer {
+ name: "pool1"
+ type: "Pooling"
+ bottom: "conv1_2"
+ top: "pool1"
+ pooling_param {
+ pool: MAX
+ kernel_size: 2
+ stride: 2
+ }
+}
+layer {
+ name: "conv2_1"
+ type: "Convolution"
+ bottom: "pool1"
+ top: "conv2_1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 128
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu2_1"
+ type: "ReLU"
+ bottom: "conv2_1"
+ top: "conv2_1"
+}
+layer {
+ name: "conv2_2"
+ type: "Convolution"
+ bottom: "conv2_1"
+ top: "conv2_2"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 128
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu2_2"
+ type: "ReLU"
+ bottom: "conv2_2"
+ top: "conv2_2"
+}
+layer {
+ name: "pool2"
+ type: "Pooling"
+ bottom: "conv2_2"
+ top: "pool2"
+ pooling_param {
+ pool: MAX
+ kernel_size: 2
+ stride: 2
+ }
+}
+layer {
+ name: "conv3_1"
+ type: "Convolution"
+ bottom: "pool2"
+ top: "conv3_1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 256
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu3_1"
+ type: "ReLU"
+ bottom: "conv3_1"
+ top: "conv3_1"
+}
+layer {
+ name: "conv3_2"
+ type: "Convolution"
+ bottom: "conv3_1"
+ top: "conv3_2"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 256
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu3_2"
+ type: "ReLU"
+ bottom: "conv3_2"
+ top: "conv3_2"
+}
+layer {
+ name: "conv3_3"
+ type: "Convolution"
+ bottom: "conv3_2"
+ top: "conv3_3"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 256
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu3_3"
+ type: "ReLU"
+ bottom: "conv3_3"
+ top: "conv3_3"
+}
+layer {
+ name: "pool3"
+ type: "Pooling"
+ bottom: "conv3_3"
+ top: "pool3"
+ pooling_param {
+ pool: MAX
+ kernel_size: 2
+ stride: 2
+ }
+}
+layer {
+ name: "conv4_1"
+ type: "Convolution"
+ bottom: "pool3"
+ top: "conv4_1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu4_1"
+ type: "ReLU"
+ bottom: "conv4_1"
+ top: "conv4_1"
+}
+layer {
+ name: "conv4_2"
+ type: "Convolution"
+ bottom: "conv4_1"
+ top: "conv4_2"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu4_2"
+ type: "ReLU"
+ bottom: "conv4_2"
+ top: "conv4_2"
+}
+layer {
+ name: "conv4_3"
+ type: "Convolution"
+ bottom: "conv4_2"
+ top: "conv4_3"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu4_3"
+ type: "ReLU"
+ bottom: "conv4_3"
+ top: "conv4_3"
+}
+layer {
+ name: "pool4"
+ type: "Pooling"
+ bottom: "conv4_3"
+ top: "pool4"
+ pooling_param {
+ pool: MAX
+ kernel_size: 2
+ stride: 2
+ }
+}
+layer {
+ name: "conv5_1"
+ type: "Convolution"
+ bottom: "pool4"
+ top: "conv5_1"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu5_1"
+ type: "ReLU"
+ bottom: "conv5_1"
+ top: "conv5_1"
+}
+layer {
+ name: "conv5_2"
+ type: "Convolution"
+ bottom: "conv5_1"
+ top: "conv5_2"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu5_2"
+ type: "ReLU"
+ bottom: "conv5_2"
+ top: "conv5_2"
+}
+layer {
+ name: "conv5_3"
+ type: "Convolution"
+ bottom: "conv5_2"
+ top: "conv5_3"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 512
+ pad: 1
+ kernel_size: 3
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu5_3"
+ type: "ReLU"
+ bottom: "conv5_3"
+ top: "conv5_3"
+}
+layer {
+ name: "pool5"
+ type: "Pooling"
+ bottom: "conv5_3"
+ top: "pool5"
+ pooling_param {
+ pool: MAX
+ kernel_size: 2
+ stride: 2
+ }
+}
+layer {
+ name: "fc6_cs"
+ type: "Convolution"
+ bottom: "pool5"
+ top: "fc6_cs"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 4096
+ pad: 0
+ kernel_size: 7
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu6_cs"
+ type: "ReLU"
+ bottom: "fc6_cs"
+ top: "fc6_cs"
+}
+layer {
+ name: "fc7_cs"
+ type: "Convolution"
+ bottom: "fc6_cs"
+ top: "fc7_cs"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 4096
+ pad: 0
+ kernel_size: 1
+ stride: 1
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layer {
+ name: "relu7_cs"
+ type: "ReLU"
+ bottom: "fc7_cs"
+ top: "fc7_cs"
+}
+layer {
+ name: "score_fr"
+ type: "Convolution"
+ bottom: "fc7_cs"
+ top: "score_fr"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 20
+ pad: 0
+ kernel_size: 1
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "upscore2"
+ type: "Deconvolution"
+ bottom: "score_fr"
+ top: "upscore2"
+ param {
+ lr_mult: 1
+ }
+ convolution_param {
+ num_output: 20
+ bias_term: false
+ kernel_size: 4
+ stride: 2
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "score_pool4"
+ type: "Convolution"
+ bottom: "pool4"
+ top: "score_pool4"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 20
+ pad: 0
+ kernel_size: 1
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "score_pool4c"
+ type: "Crop"
+ bottom: "score_pool4"
+ bottom: "upscore2"
+ top: "score_pool4c"
+ crop_param {
+ axis: 2
+ offset: 5
+ }
+}
+layer {
+ name: "fuse_pool4"
+ type: "Eltwise"
+ bottom: "upscore2"
+ bottom: "score_pool4c"
+ top: "fuse_pool4"
+ eltwise_param {
+ operation: SUM
+ }
+}
+layer {
+ name: "upscore_pool4"
+ type: "Deconvolution"
+ bottom: "fuse_pool4"
+ top: "upscore_pool4"
+ param {
+ lr_mult: 1
+ }
+ convolution_param {
+ num_output: 20
+ bias_term: false
+ kernel_size: 4
+ stride: 2
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "score_pool3"
+ type: "Convolution"
+ bottom: "pool3"
+ top: "score_pool3"
+ param {
+ lr_mult: 1
+ decay_mult: 1
+ }
+ param {
+ lr_mult: 2
+ decay_mult: 0
+ }
+ convolution_param {
+ num_output: 20
+ pad: 0
+ kernel_size: 1
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "score_pool3c"
+ type: "Crop"
+ bottom: "score_pool3"
+ bottom: "upscore_pool4"
+ top: "score_pool3c"
+ crop_param {
+ axis: 2
+ offset: 9
+ }
+}
+layer {
+ name: "fuse_pool3"
+ type: "Eltwise"
+ bottom: "upscore_pool4"
+ bottom: "score_pool3c"
+ top: "fuse_pool3"
+ eltwise_param {
+ operation: SUM
+ }
+}
+layer {
+ name: "upscore8"
+ type: "Deconvolution"
+ bottom: "fuse_pool3"
+ top: "upscore8"
+ param {
+ lr_mult: 1
+ }
+ convolution_param {
+ num_output: 20
+ bias_term: false
+ kernel_size: 16
+ stride: 8
+ weight_filler {
+ type: "xavier"
+ }
+ bias_filler {
+ type: "constant"
+ }
+ }
+}
+layer {
+ name: "score"
+ type: "Crop"
+ bottom: "upscore8"
+ bottom: "data"
+ top: "score"
+ crop_param {
+ axis: 2
+ offset: 31
+ }
+}
diff --git a/scripts/eval_cityscapes/cityscapes.py b/scripts/eval_cityscapes/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..05b14715d3bc265e56836f84118582360f237d99
--- /dev/null
+++ b/scripts/eval_cityscapes/cityscapes.py
@@ -0,0 +1,141 @@
+# The following code is modified from https://github.com/shelhamer/clockwork-fcn
+import sys
+import os
+import glob
+import numpy as np
+from PIL import Image
+
+
+class cityscapes:
+ def __init__(self, data_path):
+ # data_path something like /data2/cityscapes
+ self.dir = data_path
+ self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence',
+ 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain',
+ 'sky', 'person', 'rider', 'car', 'truck',
+ 'bus', 'train', 'motorcycle', 'bicycle']
+ self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32)
+ # import cityscapes label helper and set up label mappings
+ sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir))
+ labels = __import__('labels')
+ self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs
+ self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples
+
+ def get_dset(self, split):
+ '''
+ List images as (city, id) for the specified split
+
+ TODO(shelhamer) generate splits from cityscapes itself, instead of
+ relying on these separately made text files.
+ '''
+ if split == 'train':
+ dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines()
+ else:
+ dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines()
+ return [(item.split('/')[0], item.split('/')[1]) for item in dataset]
+
+ def load_image(self, split, city, idx):
+ im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx))
+ return im
+
+ def assign_trainIds(self, label):
+ """
+ Map the given label IDs to the train IDs appropriate for training
+ Use the label mapping provided in labels.py from the cityscapes scripts
+ """
+ label = np.array(label, dtype=np.float32)
+ if sys.version_info[0] < 3:
+ for k, v in self.id2trainId.iteritems():
+ label[label == k] = v
+ else:
+ for k, v in self.id2trainId.items():
+ label[label == k] = v
+ return label
+
+ def load_label(self, split, city, idx):
+ """
+ Load label image as 1 x height x width integer array of label indices.
+ The leading singleton dimension is required by the loss.
+ """
+ label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx))
+ label = self.assign_trainIds(label) # get proper labels for eval
+ label = np.array(label, dtype=np.uint8)
+ label = label[np.newaxis, ...]
+ return label
+
+ def preprocess(self, im):
+ """
+ Preprocess loaded image (by load_image) for Caffe:
+ - cast to float
+ - switch channels RGB -> BGR
+ - subtract mean
+ - transpose to channel x height x width order
+ """
+ in_ = np.array(im, dtype=np.float32)
+ in_ = in_[:, :, ::-1]
+ in_ -= self.mean
+ in_ = in_.transpose((2, 0, 1))
+ return in_
+
+ def palette(self, label):
+ '''
+ Map trainIds to colors as specified in labels.py
+ '''
+ if label.ndim == 3:
+ label = label[0]
+ color = np.empty((label.shape[0], label.shape[1], 3))
+ if sys.version_info[0] < 3:
+ for k, v in self.trainId2color.iteritems():
+ color[label == k, :] = v
+ else:
+ for k, v in self.trainId2color.items():
+ color[label == k, :] = v
+ return color
+
+ def make_boundaries(label, thickness=None):
+ """
+ Input is an image label, output is a numpy array mask encoding the boundaries of the objects
+ Extract pixels at the true boundary by dilation - erosion of label.
+ Don't just pick the void label as it is not exclusive to the boundaries.
+ """
+ assert(thickness is not None)
+ import skimage.morphology as skm
+ void = 255
+ mask = np.logical_and(label > 0, label != void)[0]
+ selem = skm.disk(thickness)
+ boundaries = np.logical_xor(skm.dilation(mask, selem),
+ skm.erosion(mask, selem))
+ return boundaries
+
+ def list_label_frames(self, split):
+ """
+ Select labeled frames from a split for evaluation
+ collected as (city, shot, idx) tuples
+ """
+ def file2idx(f):
+ """Helper to convert file path into frame ID"""
+ city, shot, frame = (os.path.basename(f).split('_')[:3])
+ return "_".join([city, shot, frame])
+ frames = []
+ cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))]
+ for c in cities:
+ files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c)))
+ frames.extend([file2idx(f) for f in files])
+ return frames
+
+ def collect_frame_sequence(self, split, idx, length):
+ """
+ Collect sequence of frames preceding (and including) a labeled frame
+ as a list of Images.
+
+ Note: 19 preceding frames are provided for each labeled frame.
+ """
+ SEQ_LEN = length
+ city, shot, frame = idx.split('_')
+ frame = int(frame)
+ frame_seq = []
+ for i in range(frame - SEQ_LEN, frame + 1):
+ frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format(
+ self.dir, city, shot, i)
+ frame_seq.append(Image.open(frame_path))
+ return frame_seq
diff --git a/scripts/eval_cityscapes/download_fcn8s.sh b/scripts/eval_cityscapes/download_fcn8s.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d769d8960bda1d85359aa125363e6f71980ff610
--- /dev/null
+++ b/scripts/eval_cityscapes/download_fcn8s.sh
@@ -0,0 +1,3 @@
+URL=http://efrosgans.eecs.berkeley.edu/pix2pix_extra/fcn-8s-cityscapes.caffemodel
+OUTPUT_FILE=./scripts/eval_cityscapes/caffemodel/fcn-8s-cityscapes.caffemodel
+wget -N $URL -O $OUTPUT_FILE
diff --git a/scripts/eval_cityscapes/evaluate.py b/scripts/eval_cityscapes/evaluate.py
new file mode 100644
index 0000000000000000000000000000000000000000..c53b43934ee833dd6b8bbc5b5615198daad244a0
--- /dev/null
+++ b/scripts/eval_cityscapes/evaluate.py
@@ -0,0 +1,69 @@
+import os
+import caffe
+import argparse
+import numpy as np
+import scipy.misc
+from PIL import Image
+from util import segrun, fast_hist, get_scores
+from cityscapes import cityscapes
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset")
+parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
+parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results")
+parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored")
+parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use")
+parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated")
+parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images")
+args = parser.parse_args()
+
+
+def main():
+ if not os.path.isdir(args.output_dir):
+ os.makedirs(args.output_dir)
+ if args.save_output_images > 0:
+ output_image_dir = args.output_dir + 'image_outputs/'
+ if not os.path.isdir(output_image_dir):
+ os.makedirs(output_image_dir)
+ CS = cityscapes(args.cityscapes_dir)
+ n_cl = len(CS.classes)
+ label_frames = CS.list_label_frames(args.split)
+ caffe.set_device(args.gpu_id)
+ caffe.set_mode_gpu()
+ net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
+ args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
+ caffe.TEST)
+
+ hist_perframe = np.zeros((n_cl, n_cl))
+ for i, idx in enumerate(label_frames):
+ if i % 10 == 0:
+ print('Evaluating: %d/%d' % (i, len(label_frames)))
+ city = idx.split('_')[0]
+ # idx is city_shot_frame
+ label = CS.load_label(args.split, city, idx)
+ im_file = args.result_dir + '/' + idx + '_leftImg8bit.png'
+ im = np.array(Image.open(im_file))
+ im = scipy.misc.imresize(im, (label.shape[1], label.shape[2]))
+ # im = np.array(Image.fromarray(im).resize((label.shape[1], label.shape[2]))) # Note: scipy.misc.imresize is deprecated, but we still use it for reproducibility.
+ out = segrun(net, CS.preprocess(im))
+ hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl)
+ if args.save_output_images > 0:
+ label_im = CS.palette(label)
+ pred_im = CS.palette(out)
+ scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im)
+ scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im)
+ scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im)
+
+ mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe)
+ with open(args.output_dir + '/evaluation_results.txt', 'w') as f:
+ f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc)
+ f.write('Mean class accuracy: %f\n' % mean_class_acc)
+ f.write('Mean class IoU: %f\n' % mean_class_iou)
+ f.write('************ Per class numbers below ************\n')
+ for i, cl in enumerate(CS.classes):
+ while len(cl) < 15:
+ cl = cl + ' '
+ f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i]))
+
+
+main()
diff --git a/scripts/eval_cityscapes/util.py b/scripts/eval_cityscapes/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fce27fd6eba907d4caaa185d15884aef82e8a87
--- /dev/null
+++ b/scripts/eval_cityscapes/util.py
@@ -0,0 +1,42 @@
+# The following code is modified from https://github.com/shelhamer/clockwork-fcn
+import numpy as np
+
+
+def get_out_scoremap(net):
+ return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
+
+
+def feed_net(net, in_):
+ """
+ Load prepared input into net.
+ """
+ net.blobs['data'].reshape(1, *in_.shape)
+ net.blobs['data'].data[...] = in_
+
+
+def segrun(net, in_):
+ feed_net(net, in_)
+ net.forward()
+ return get_out_scoremap(net)
+
+
+def fast_hist(a, b, n):
+ k = np.where((a >= 0) & (a < n))[0]
+ bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2)
+ if len(bc) != n**2:
+ # ignore this example if dimension mismatch
+ return 0
+ return bc.reshape(n, n)
+
+
+def get_scores(hist):
+ # Mean pixel accuracy
+ acc = np.diag(hist).sum() / (hist.sum() + 1e-12)
+
+ # Per class accuracy
+ cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12)
+
+ # Per class IoU
+ iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12)
+
+ return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu
diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh
new file mode 100644
index 0000000000000000000000000000000000000000..801c7dded7a27cf850f2671b020bcdc82641a7c8
--- /dev/null
+++ b/scripts/install_deps.sh
@@ -0,0 +1,3 @@
+set -ex
+pip install visdom
+pip install dominate
diff --git a/scripts/test_before_push.py b/scripts/test_before_push.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccd8788fd8a6c73526701450390b0ef5fd3f7efa
--- /dev/null
+++ b/scripts/test_before_push.py
@@ -0,0 +1,51 @@
+# Simple script to make sure basic usage
+# such as training, testing, saving and loading
+# runs without errors.
+import os
+
+
+def run(command):
+ print(command)
+ exit_status = os.system(command)
+ if exit_status > 0:
+ exit(1)
+
+
+if __name__ == '__main__':
+ # download mini datasets
+ if not os.path.exists('./datasets/mini'):
+ run('bash ./datasets/download_cyclegan_dataset.sh mini')
+
+ if not os.path.exists('./datasets/mini_pix2pix'):
+ run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
+
+ # pretrained cyclegan model
+ if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
+ run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
+ run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
+
+ # pretrained pix2pix model
+ if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
+ run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
+ if not os.path.exists('./datasets/facades'):
+ run('bash ./datasets/download_pix2pix_dataset.sh facades')
+ run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
+
+ # cyclegan train/test
+ run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
+ run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
+
+ # pix2pix train/test
+ run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1')
+ run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
+
+ # template train/test
+ run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1')
+ run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1')
+
+ # colorization train/test (optional)
+ if not os.path.exists('./datasets/mini_colorization'):
+ run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization')
+
+ run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1')
+ run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1')
diff --git a/scripts/test_colorization.sh b/scripts/test_colorization.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9837fd5fffae32ca2d72101edb1926b2c552f8ef
--- /dev/null
+++ b/scripts/test_colorization.sh
@@ -0,0 +1,2 @@
+set -ex
+python test.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization
diff --git a/scripts/test_cyclegan.sh b/scripts/test_cyclegan.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9036bf8676aba628161a2db69a89fde3c23fb16f
--- /dev/null
+++ b/scripts/test_cyclegan.sh
@@ -0,0 +1,2 @@
+set -ex
+python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --phase test --no_dropout
diff --git a/scripts/test_pix2pix.sh b/scripts/test_pix2pix.sh
new file mode 100644
index 0000000000000000000000000000000000000000..589599b4c16b1777b8f44e1ee1aadda127de969a
--- /dev/null
+++ b/scripts/test_pix2pix.sh
@@ -0,0 +1,2 @@
+set -ex
+python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --dataset_mode aligned --norm batch
diff --git a/scripts/test_single.sh b/scripts/test_single.sh
new file mode 100644
index 0000000000000000000000000000000000000000..eada640276bd31bdd141c88f13dea3d1379e684d
--- /dev/null
+++ b/scripts/test_single.sh
@@ -0,0 +1,2 @@
+set -ex
+python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --netG unet_256 --direction BtoA --dataset_mode single --norm batch
diff --git a/scripts/train_colorization.sh b/scripts/train_colorization.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e6c06801209f69705aabde30df090c26c73801ff
--- /dev/null
+++ b/scripts/train_colorization.sh
@@ -0,0 +1,2 @@
+set -ex
+python train.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization
diff --git a/scripts/train_cyclegan.sh b/scripts/train_cyclegan.sh
new file mode 100644
index 0000000000000000000000000000000000000000..567721e977d407e1693a47ac1aa738e58f39dbd1
--- /dev/null
+++ b/scripts/train_cyclegan.sh
@@ -0,0 +1,2 @@
+set -ex
+python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --pool_size 50 --no_dropout
diff --git a/scripts/train_pix2pix.sh b/scripts/train_pix2pix.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0171001c5545d96210caa34449fd6ac674eaeb28
--- /dev/null
+++ b/scripts/train_pix2pix.sh
@@ -0,0 +1,2 @@
+set -ex
+python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --lambda_L1 100 --dataset_mode aligned --norm batch --pool_size 0
diff --git a/test.py b/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..b91d302fa3669f589aa75c6770efadcfc33a4497
--- /dev/null
+++ b/test.py
@@ -0,0 +1,80 @@
+"""General-purpose test script for image-to-image translation.
+
+Once you have trained your model with train.py, you can use this script to test the model.
+It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
+
+It first creates model and dataset given the option. It will hard-code some parameters.
+It then runs inference for '--num_test' images and save results to an HTML file.
+
+Example (You need to train models first or download pre-trained models from our website):
+ Test a CycleGAN model (both sides):
+ python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+
+ Test a CycleGAN model (one side only):
+ python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
+
+ The option '--model test' is used for generating CycleGAN results only for one side.
+ This option will automatically set '--dataset_mode single', which only loads the images from one set.
+ On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
+ which is sometimes unnecessary. The results will be saved at ./results/.
+ Use '--results_dir ' to specify the results directory.
+
+ Test a pix2pix model:
+ python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+
+See options/base_options.py and options/test_options.py for more test options.
+See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
+See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
+"""
+import os
+from options.test_options import TestOptions
+from data import create_dataset
+from models import create_model
+from util.visualizer import save_images
+from util import html
+
+try:
+ import wandb
+except ImportError:
+ print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
+
+
+if __name__ == '__main__':
+ opt = TestOptions().parse() # get test options
+ # hard-code some parameters for test
+ opt.num_threads = 0 # test code only supports num_threads = 0
+ opt.batch_size = 1 # test code only supports batch_size = 1
+ opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
+ opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
+ opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
+ dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
+ model = create_model(opt) # create a model given opt.model and other options
+ model.setup(opt) # regular setup: load and print networks; create schedulers
+
+ # initialize logger
+ if opt.use_wandb:
+ wandb_run = wandb.init(project=opt.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
+ wandb_run._label(repo='CycleGAN-and-pix2pix')
+
+ # create a website
+ web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
+ if opt.load_iter > 0: # load_iter is 0 by default
+ web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
+ print('creating web directory', web_dir)
+ webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
+ # test with eval mode. This only affects layers like batchnorm and dropout.
+ # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
+ # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
+ if opt.eval:
+ model.eval()
+ for i, data in enumerate(dataset):
+ if i >= opt.num_test: # only apply our model to opt.num_test images.
+ break
+ model.set_input(data) # unpack data from data loader
+ model.test() # run inference
+ visuals = model.get_current_visuals() # get image results
+ img_path = model.get_image_paths() # get image paths
+ if i % 5 == 0: # save images to an HTML file
+ print('processing (%04d)-th image... %s' % (i, img_path))
+ save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, use_wandb=opt.use_wandb)
+ webpage.save() # save the HTML
diff --git a/train.py b/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..2852652df82abe91807e69285ae39fceae0fe651
--- /dev/null
+++ b/train.py
@@ -0,0 +1,77 @@
+"""General-purpose training script for image-to-image translation.
+
+This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
+different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
+You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
+
+It first creates model, dataset, and visualizer given the option.
+It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
+The script supports continue/resume training. Use '--continue_train' to resume your previous training.
+
+Example:
+ Train a CycleGAN model:
+ python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
+ Train a pix2pix model:
+ python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
+
+See options/base_options.py and options/train_options.py for more training options.
+See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
+See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
+"""
+import time
+from options.train_options import TrainOptions
+from data import create_dataset
+from models import create_model
+from util.visualizer import Visualizer
+
+if __name__ == '__main__':
+ opt = TrainOptions().parse() # get training options
+ dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
+ dataset_size = len(dataset) # get the number of images in the dataset.
+ print('The number of training images = %d' % dataset_size)
+
+ model = create_model(opt) # create a model given opt.model and other options
+ model.setup(opt) # regular setup: load and print networks; create schedulers
+ visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
+ total_iters = 0 # the total number of training iterations
+
+ for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , +
+ epoch_start_time = time.time() # timer for entire epoch
+ iter_data_time = time.time() # timer for data loading per iteration
+ epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
+ visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
+ model.update_learning_rate() # update learning rates in the beginning of every epoch.
+ for i, data in enumerate(dataset): # inner loop within one epoch
+ iter_start_time = time.time() # timer for computation per iteration
+ if total_iters % opt.print_freq == 0:
+ t_data = iter_start_time - iter_data_time
+
+ total_iters += opt.batch_size
+ epoch_iter += opt.batch_size
+ model.set_input(data) # unpack data from dataset and apply preprocessing
+ model.optimize_parameters() # calculate loss functions, get gradients, update network weights
+
+ if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
+ save_result = total_iters % opt.update_html_freq == 0
+ model.compute_visuals()
+ visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
+
+ if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
+ losses = model.get_current_losses()
+ t_comp = (time.time() - iter_start_time) / opt.batch_size
+ visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
+ if opt.display_id > 0:
+ visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
+
+ if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations
+ print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
+ save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
+ model.save_networks(save_suffix)
+
+ iter_data_time = time.time()
+ if epoch % opt.save_epoch_freq == 0: # cache our model every epochs
+ print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
+ model.save_networks('latest')
+ model.save_networks(epoch)
+
+ print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
diff --git a/util/__init__.py b/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae36f63d8859ec0c60dcbfe67c4ac324e751ddf7
--- /dev/null
+++ b/util/__init__.py
@@ -0,0 +1 @@
+"""This package includes a miscellaneous collection of useful helper functions."""
diff --git a/util/__pycache__/__init__.cpython-39.pyc b/util/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e098df63215bb85ce7d39b286b097247ea8fa75
Binary files /dev/null and b/util/__pycache__/__init__.cpython-39.pyc differ
diff --git a/util/__pycache__/html.cpython-39.pyc b/util/__pycache__/html.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21d8b958bd27ab931b7e64f1625841c15b0ac4c0
Binary files /dev/null and b/util/__pycache__/html.cpython-39.pyc differ
diff --git a/util/__pycache__/util.cpython-39.pyc b/util/__pycache__/util.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89a30126482655cfc59b7df30c4be184a2acb8a2
Binary files /dev/null and b/util/__pycache__/util.cpython-39.pyc differ
diff --git a/util/__pycache__/visualizer.cpython-39.pyc b/util/__pycache__/visualizer.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca1efeb93e6af56aa0308760eeac29bf4013d43f
Binary files /dev/null and b/util/__pycache__/visualizer.cpython-39.pyc differ
diff --git a/util/get_data.py b/util/get_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..97edc3ce3c3ab6d6080dca34e73a5fb77bb715fb
--- /dev/null
+++ b/util/get_data.py
@@ -0,0 +1,110 @@
+from __future__ import print_function
+import os
+import tarfile
+import requests
+from warnings import warn
+from zipfile import ZipFile
+from bs4 import BeautifulSoup
+from os.path import abspath, isdir, join, basename
+
+
+class GetData(object):
+ """A Python script for downloading CycleGAN or pix2pix datasets.
+
+ Parameters:
+ technique (str) -- One of: 'cyclegan' or 'pix2pix'.
+ verbose (bool) -- If True, print additional information.
+
+ Examples:
+ >>> from util.get_data import GetData
+ >>> gd = GetData(technique='cyclegan')
+ >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
+
+ Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
+ and 'scripts/download_cyclegan_model.sh'.
+ """
+
+ def __init__(self, technique='cyclegan', verbose=True):
+ url_dict = {
+ 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
+ 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
+ }
+ self.url = url_dict.get(technique.lower())
+ self._verbose = verbose
+
+ def _print(self, text):
+ if self._verbose:
+ print(text)
+
+ @staticmethod
+ def _get_options(r):
+ soup = BeautifulSoup(r.text, 'lxml')
+ options = [h.text for h in soup.find_all('a', href=True)
+ if h.text.endswith(('.zip', 'tar.gz'))]
+ return options
+
+ def _present_options(self):
+ r = requests.get(self.url)
+ options = self._get_options(r)
+ print('Options:\n')
+ for i, o in enumerate(options):
+ print("{0}: {1}".format(i, o))
+ choice = input("\nPlease enter the number of the "
+ "dataset above you wish to download:")
+ return options[int(choice)]
+
+ def _download_data(self, dataset_url, save_path):
+ if not isdir(save_path):
+ os.makedirs(save_path)
+
+ base = basename(dataset_url)
+ temp_save_path = join(save_path, base)
+
+ with open(temp_save_path, "wb") as f:
+ r = requests.get(dataset_url)
+ f.write(r.content)
+
+ if base.endswith('.tar.gz'):
+ obj = tarfile.open(temp_save_path)
+ elif base.endswith('.zip'):
+ obj = ZipFile(temp_save_path, 'r')
+ else:
+ raise ValueError("Unknown File Type: {0}.".format(base))
+
+ self._print("Unpacking Data...")
+ obj.extractall(save_path)
+ obj.close()
+ os.remove(temp_save_path)
+
+ def get(self, save_path, dataset=None):
+ """
+
+ Download a dataset.
+
+ Parameters:
+ save_path (str) -- A directory to save the data to.
+ dataset (str) -- (optional). A specific dataset to download.
+ Note: this must include the file extension.
+ If None, options will be presented for you
+ to choose from.
+
+ Returns:
+ save_path_full (str) -- the absolute path to the downloaded data.
+
+ """
+ if dataset is None:
+ selected_dataset = self._present_options()
+ else:
+ selected_dataset = dataset
+
+ save_path_full = join(save_path, selected_dataset.split('.')[0])
+
+ if isdir(save_path_full):
+ warn("\n'{0}' already exists. Voiding Download.".format(
+ save_path_full))
+ else:
+ self._print('Downloading Data...')
+ url = "{0}/{1}".format(self.url, selected_dataset)
+ self._download_data(url, save_path=save_path)
+
+ return abspath(save_path_full)
diff --git a/util/html.py b/util/html.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc3262a1eafda34842e4dbad47bb6ba72f0c5a68
--- /dev/null
+++ b/util/html.py
@@ -0,0 +1,86 @@
+import dominate
+from dominate.tags import meta, h3, table, tr, td, p, a, img, br
+import os
+
+
+class HTML:
+ """This HTML class allows us to save images and write texts into a single HTML file.
+
+ It consists of functions such as (add a text header to the HTML file),
+ (add a row of images to the HTML file), and (save the HTML to the disk).
+ It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
+ """
+
+ def __init__(self, web_dir, title, refresh=0):
+ """Initialize the HTML classes
+
+ Parameters:
+ web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0:
+ with self.doc.head:
+ meta(http_equiv="refresh", content=str(refresh))
+
+ def get_image_dir(self):
+ """Return the directory that stores images"""
+ return self.img_dir
+
+ def add_header(self, text):
+ """Insert a header to the HTML file
+
+ Parameters:
+ text (str) -- the header text
+ """
+ with self.doc:
+ h3(text)
+
+ def add_images(self, ims, txts, links, width=400):
+ """add images to the HTML file
+
+ Parameters:
+ ims (str list) -- a list of image paths
+ txts (str list) -- a list of image names shown on the website
+ links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
+ """
+ self.t = table(border=1, style="table-layout: fixed;") # Insert a table
+ self.doc.add(self.t)
+ with self.t:
+ with tr():
+ for im, txt, link in zip(ims, txts, links):
+ with td(style="word-wrap: break-word;", halign="center", valign="top"):
+ with p():
+ with a(href=os.path.join('images', link)):
+ img(style="width:%dpx" % width, src=os.path.join('images', im))
+ br()
+ p(txt)
+
+ def save(self):
+ """save the current content to the HMTL file"""
+ html_file = '%s/index.html' % self.web_dir
+ f = open(html_file, 'wt')
+ f.write(self.doc.render())
+ f.close()
+
+
+if __name__ == '__main__': # we show an example usage here.
+ html = HTML('web/', 'test_html')
+ html.add_header('hello world')
+
+ ims, txts, links = [], [], []
+ for n in range(4):
+ ims.append('image_%d.png' % n)
+ txts.append('text_%d' % n)
+ links.append('image_%d.png' % n)
+ html.add_images(ims, txts, links)
+ html.save()
diff --git a/util/image_pool.py b/util/image_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d086f882bc3d1b90c529fce6cddaaa75f2005d7
--- /dev/null
+++ b/util/image_pool.py
@@ -0,0 +1,54 @@
+import random
+import torch
+
+
+class ImagePool():
+ """This class implements an image buffer that stores previously generated images.
+
+ This buffer enables us to update discriminators using a history of generated images
+ rather than the ones produced by the latest generators.
+ """
+
+ def __init__(self, pool_size):
+ """Initialize the ImagePool class
+
+ Parameters:
+ pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
+ """
+ self.pool_size = pool_size
+ if self.pool_size > 0: # create an empty pool
+ self.num_imgs = 0
+ self.images = []
+
+ def query(self, images):
+ """Return an image from the pool.
+
+ Parameters:
+ images: the latest generated images from the generator
+
+ Returns images from the buffer.
+
+ By 50/100, the buffer will return input images.
+ By 50/100, the buffer will return images previously stored in the buffer,
+ and insert the current images to the buffer.
+ """
+ if self.pool_size == 0: # if the buffer size is 0, do nothing
+ return images
+ return_images = []
+ for image in images:
+ image = torch.unsqueeze(image.data, 0)
+ if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
+ self.num_imgs = self.num_imgs + 1
+ self.images.append(image)
+ return_images.append(image)
+ else:
+ p = random.uniform(0, 1)
+ if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
+ random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
+ tmp = self.images[random_id].clone()
+ self.images[random_id] = image
+ return_images.append(tmp)
+ else: # by another 50% chance, the buffer will return the current image
+ return_images.append(image)
+ return_images = torch.cat(return_images, 0) # collect all the images and return
+ return return_images
diff --git a/util/util.py b/util/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..b050c13e1d6d0f197af356b099b9c11c0714522c
--- /dev/null
+++ b/util/util.py
@@ -0,0 +1,103 @@
+"""This module contains simple helper functions """
+from __future__ import print_function
+import torch
+import numpy as np
+from PIL import Image
+import os
+
+
+def tensor2im(input_image, imtype=np.uint8):
+ """"Converts a Tensor array into a numpy image array.
+
+ Parameters:
+ input_image (tensor) -- the input image tensor array
+ imtype (type) -- the desired type of the converted numpy array
+ """
+ if not isinstance(input_image, np.ndarray):
+ if isinstance(input_image, torch.Tensor): # get the data from a variable
+ image_tensor = input_image.data
+ else:
+ return input_image
+ image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
+ if image_numpy.shape[0] == 1: # grayscale to RGB
+ image_numpy = np.tile(image_numpy, (3, 1, 1))
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
+ else: # if it is a numpy array, do nothing
+ image_numpy = input_image
+ return image_numpy.astype(imtype)
+
+
+def diagnose_network(net, name='network'):
+ """Calculate and print the mean of average absolute(gradients)
+
+ Parameters:
+ net (torch network) -- Torch network
+ name (str) -- the name of the network
+ """
+ mean = 0.0
+ count = 0
+ for param in net.parameters():
+ if param.grad is not None:
+ mean += torch.mean(torch.abs(param.grad.data))
+ count += 1
+ if count > 0:
+ mean = mean / count
+ print(name)
+ print(mean)
+
+
+def save_image(image_numpy, image_path, aspect_ratio=1.0):
+ """Save a numpy image to the disk
+
+ Parameters:
+ image_numpy (numpy array) -- input numpy array
+ image_path (str) -- the path of the image
+ """
+
+ image_pil = Image.fromarray(image_numpy)
+ h, w, _ = image_numpy.shape
+
+ if aspect_ratio > 1.0:
+ image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
+ if aspect_ratio < 1.0:
+ image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
+ image_pil.save(image_path)
+
+
+def print_numpy(x, val=True, shp=False):
+ """Print the mean, min, max, median, std, and size of a numpy array
+
+ Parameters:
+ val (bool) -- if print the values of the numpy array
+ shp (bool) -- if print the shape of the numpy array
+ """
+ x = x.astype(np.float64)
+ if shp:
+ print('shape,', x.shape)
+ if val:
+ x = x.flatten()
+ print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
+ np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
+
+
+def mkdirs(paths):
+ """create empty directories if they don't exist
+
+ Parameters:
+ paths (str list) -- a list of directory paths
+ """
+ if isinstance(paths, list) and not isinstance(paths, str):
+ for path in paths:
+ mkdir(path)
+ else:
+ mkdir(paths)
+
+
+def mkdir(path):
+ """create a single empty directory if it didn't exist
+
+ Parameters:
+ path (str) -- a single directory path
+ """
+ if not os.path.exists(path):
+ os.makedirs(path)
diff --git a/util/visualizer.py b/util/visualizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6bb884f2579d8e2e252795add98ba029e0cf68b
--- /dev/null
+++ b/util/visualizer.py
@@ -0,0 +1,257 @@
+import numpy as np
+import os
+import sys
+import ntpath
+import time
+from . import util, html
+from subprocess import Popen, PIPE
+
+
+try:
+ import wandb
+except ImportError:
+ print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
+
+if sys.version_info[0] == 2:
+ VisdomExceptionBase = Exception
+else:
+ VisdomExceptionBase = ConnectionError
+
+
+def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256, use_wandb=False):
+ """Save images to the disk.
+
+ Parameters:
+ webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
+ visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
+ image_path (str) -- the string is used to create image paths
+ aspect_ratio (float) -- the aspect ratio of saved images
+ width (int) -- the images will be resized to width x width
+
+ This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
+ """
+ image_dir = webpage.get_image_dir()
+ short_path = ntpath.basename(image_path[0])
+ name = os.path.splitext(short_path)[0]
+
+ webpage.add_header(name)
+ ims, txts, links = [], [], []
+ ims_dict = {}
+ for label, im_data in visuals.items():
+ im = util.tensor2im(im_data)
+ image_name = '%s_%s.png' % (name, label)
+ save_path = os.path.join(image_dir, image_name)
+ util.save_image(im, save_path, aspect_ratio=aspect_ratio)
+ ims.append(image_name)
+ txts.append(label)
+ links.append(image_name)
+ if use_wandb:
+ ims_dict[label] = wandb.Image(im)
+ webpage.add_images(ims, txts, links, width=width)
+ if use_wandb:
+ wandb.log(ims_dict)
+
+
+class Visualizer():
+ """This class includes several functions that can display/save images and print/save logging information.
+
+ It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
+ """
+
+ def __init__(self, opt):
+ """Initialize the Visualizer class
+
+ Parameters:
+ opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
+ Step 1: Cache the training/test options
+ Step 2: connect to a visdom server
+ Step 3: create an HTML object for saveing HTML filters
+ Step 4: create a logging file to store training losses
+ """
+ self.opt = opt # cache the option
+ self.display_id = opt.display_id
+ self.use_html = opt.isTrain and not opt.no_html
+ self.win_size = opt.display_winsize
+ self.name = opt.name
+ self.port = opt.display_port
+ self.saved = False
+ self.use_wandb = opt.use_wandb
+ self.wandb_project_name = opt.wandb_project_name
+ self.current_epoch = 0
+ self.ncols = opt.display_ncols
+
+ if self.display_id > 0: # connect to a visdom server given and
+ import visdom
+ self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
+ if not self.vis.check_connection():
+ self.create_visdom_connections()
+
+ if self.use_wandb:
+ self.wandb_run = wandb.init(project=self.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
+ self.wandb_run._label(repo='CycleGAN-and-pix2pix')
+
+ if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/
+ self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
+ self.img_dir = os.path.join(self.web_dir, 'images')
+ print('create web directory %s...' % self.web_dir)
+ util.mkdirs([self.web_dir, self.img_dir])
+ # create a logging file to store training losses
+ self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
+ with open(self.log_name, "a") as log_file:
+ now = time.strftime("%c")
+ log_file.write('================ Training Loss (%s) ================\n' % now)
+
+ def reset(self):
+ """Reset the self.saved status"""
+ self.saved = False
+
+ def create_visdom_connections(self):
+ """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
+ cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
+ print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
+ print('Command: %s' % cmd)
+ Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
+
+ def display_current_results(self, visuals, epoch, save_result):
+ """Display current results on visdom; save current results to an HTML file.
+
+ Parameters:
+ visuals (OrderedDict) - - dictionary of images to display or save
+ epoch (int) - - the current epoch
+ save_result (bool) - - if save the current results to an HTML file
+ """
+ if self.display_id > 0: # show images in the browser using visdom
+ ncols = self.ncols
+ if ncols > 0: # show all the images in one visdom panel
+ ncols = min(ncols, len(visuals))
+ h, w = next(iter(visuals.values())).shape[:2]
+ table_css = """""" % (w, h) # create a table css
+ # create a table of images.
+ title = self.name
+ label_html = ''
+ label_html_row = ''
+ images = []
+ idx = 0
+ for label, image in visuals.items():
+ image_numpy = util.tensor2im(image)
+ label_html_row += '%s | ' % label
+ images.append(image_numpy.transpose([2, 0, 1]))
+ idx += 1
+ if idx % ncols == 0:
+ label_html += '%s
' % label_html_row
+ label_html_row = ''
+ white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
+ while idx % ncols != 0:
+ images.append(white_image)
+ label_html_row += ' | '
+ idx += 1
+ if label_html_row != '':
+ label_html += '%s
' % label_html_row
+ try:
+ self.vis.images(images, nrow=ncols, win=self.display_id + 1,
+ padding=2, opts=dict(title=title + ' images'))
+ label_html = '' % label_html
+ self.vis.text(table_css + label_html, win=self.display_id + 2,
+ opts=dict(title=title + ' labels'))
+ except VisdomExceptionBase:
+ self.create_visdom_connections()
+
+ else: # show each image in a separate visdom panel;
+ idx = 1
+ try:
+ for label, image in visuals.items():
+ image_numpy = util.tensor2im(image)
+ self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
+ win=self.display_id + idx)
+ idx += 1
+ except VisdomExceptionBase:
+ self.create_visdom_connections()
+
+ if self.use_wandb:
+ columns = [key for key, _ in visuals.items()]
+ columns.insert(0, 'epoch')
+ result_table = wandb.Table(columns=columns)
+ table_row = [epoch]
+ ims_dict = {}
+ for label, image in visuals.items():
+ image_numpy = util.tensor2im(image)
+ wandb_image = wandb.Image(image_numpy)
+ table_row.append(wandb_image)
+ ims_dict[label] = wandb_image
+ self.wandb_run.log(ims_dict)
+ if epoch != self.current_epoch:
+ self.current_epoch = epoch
+ result_table.add_data(*table_row)
+ self.wandb_run.log({"Result": result_table})
+
+ if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
+ self.saved = True
+ # save images to the disk
+ for label, image in visuals.items():
+ image_numpy = util.tensor2im(image)
+ img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
+ util.save_image(image_numpy, img_path)
+
+ # update website
+ webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
+ for n in range(epoch, 0, -1):
+ webpage.add_header('epoch [%d]' % n)
+ ims, txts, links = [], [], []
+
+ for label, image_numpy in visuals.items():
+ image_numpy = util.tensor2im(image)
+ img_path = 'epoch%.3d_%s.png' % (n, label)
+ ims.append(img_path)
+ txts.append(label)
+ links.append(img_path)
+ webpage.add_images(ims, txts, links, width=self.win_size)
+ webpage.save()
+
+ def plot_current_losses(self, epoch, counter_ratio, losses):
+ """display the current losses on visdom display: dictionary of error labels and values
+
+ Parameters:
+ epoch (int) -- current epoch
+ counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
+ losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
+ """
+ if not hasattr(self, 'plot_data'):
+ self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
+ self.plot_data['X'].append(epoch + counter_ratio)
+ self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
+ try:
+ self.vis.line(
+ X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
+ Y=np.array(self.plot_data['Y']),
+ opts={
+ 'title': self.name + ' loss over time',
+ 'legend': self.plot_data['legend'],
+ 'xlabel': 'epoch',
+ 'ylabel': 'loss'},
+ win=self.display_id)
+ except VisdomExceptionBase:
+ self.create_visdom_connections()
+ if self.use_wandb:
+ self.wandb_run.log(losses)
+
+ # losses: same format as |losses| of plot_current_losses
+ def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
+ """print current losses on console; also save the losses to the disk
+
+ Parameters:
+ epoch (int) -- current epoch
+ iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
+ losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
+ t_comp (float) -- computational time per data point (normalized by batch_size)
+ t_data (float) -- data loading time per data point (normalized by batch_size)
+ """
+ message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
+ for k, v in losses.items():
+ message += '%s: %.3f ' % (k, v)
+
+ print(message) # print the message
+ with open(self.log_name, "a") as log_file:
+ log_file.write('%s\n' % message) # save the message