-
Notifications
You must be signed in to change notification settings - Fork 76
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
samhi
committed
Dec 22, 2018
0 parents
commit 7ad01d0
Showing
69 changed files
with
4,128 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
Copyright (c) 2017, Jun-Yan Zhu and Taesung Park | ||
All rights reserved. | ||
|
||
Redistribution and use in source and binary forms, with or without | ||
modification, are permitted provided that the following conditions are met: | ||
|
||
* Redistributions of source code must retain the above copyright notice, this | ||
list of conditions and the following disclaimer. | ||
|
||
* Redistributions in binary form must reproduce the above copyright notice, | ||
this list of conditions and the following disclaimer in the documentation | ||
and/or other materials provided with the distribution. | ||
|
||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
|
||
|
||
--------------------------- LICENSE FOR pix2pix -------------------------------- | ||
BSD License | ||
|
||
For pix2pix software | ||
Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu | ||
All rights reserved. | ||
|
||
Redistribution and use in source and binary forms, with or without | ||
modification, are permitted provided that the following conditions are met: | ||
|
||
* Redistributions of source code must retain the above copyright notice, this | ||
list of conditions and the following disclaimer. | ||
|
||
* Redistributions in binary form must reproduce the above copyright notice, | ||
this list of conditions and the following disclaimer in the documentation | ||
and/or other materials provided with the distribution. | ||
|
||
----------------------------- LICENSE FOR DCGAN -------------------------------- | ||
BSD License | ||
|
||
For dcgan.torch software | ||
|
||
Copyright (c) 2015, Facebook, Inc. All rights reserved. | ||
|
||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: | ||
|
||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. | ||
|
||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. | ||
|
||
Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. | ||
|
||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,214 @@ | ||
<img src='imgs/horse2zebra.gif' align="right" width=384> | ||
|
||
<br><br><br> | ||
|
||
# CycleGAN and pix2pix in PyTorch | ||
|
||
We provide PyTorch implementations for both unpaired and paired image-to-image translation. | ||
|
||
The code was written by [Jun-Yan Zhu](https://github.com/junyanz) and [Taesung Park](https://github.com/taesung89), and supported by [Tongzhou Wang](https://ssnl.github.io/). | ||
|
||
This PyTorch implementation produces results comparable to or better than our original Torch software. If you would like to reproduce the same results as in the papers, check out the original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) and [pix2pix Torch](https://github.com/phillipi/pix2pix) code | ||
|
||
**Note**: The current software works well with PyTorch 0.4+. Check out the older [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1) that supports PyTorch 0.1-0.3. | ||
|
||
You may find useful information in [training/test tips](docs/tips.md) and [frequently asked questions](docs/qa.md). | ||
|
||
**CycleGAN: [Project](https://junyanz.github.io/CycleGAN/) | [Paper](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN)** | ||
<img src="https://junyanz.github.io/CycleGAN/images/teaser_high_res.jpg" width="800"/> | ||
|
||
|
||
**Pix2pix: [Project](https://phillipi.github.io/pix2pix/) | [Paper](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix)** | ||
|
||
<img src="https://phillipi.github.io/pix2pix/images/teaser_v3.png" width="800px"/> | ||
|
||
|
||
**[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | by [Christopher Hesse](https://twitter.com/christophrhesse)** | ||
|
||
<img src='imgs/edges2cats.jpg' width="400px"/> | ||
|
||
If you use this code for your research, please cite: | ||
|
||
Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks | ||
[Jun-Yan Zhu](https://people.eecs.berkeley.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros) | ||
In ICCV 2017. (* equal contributions) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt) | ||
|
||
|
||
Image-to-Image Translation with Conditional Adversarial Networks | ||
[Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://people.eecs.berkeley.edu/~junyanz), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros) | ||
In CVPR 2017. [[Bibtex]](http://people.csail.mit.edu/junyanz/projects/pix2pix/pix2pix.bib) | ||
|
||
## Course | ||
CycleGAN course assignment [code](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) and [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) designed by Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" at University of Toronto. Please contact the instructor if you would like to adopt it in your course. | ||
|
||
## Other implementations | ||
### CycleGAN | ||
<p><a href="https://github.com/leehomyc/cyclegan-1"> [Tensorflow]</a> (by Harry Yang), | ||
<a href="https://github.com/architrathore/CycleGAN/">[Tensorflow]</a> (by Archit Rathore), | ||
<a href="https://github.com/vanhuyz/CycleGAN-TensorFlow">[Tensorflow]</a> (by Van Huy), | ||
<a href="https://github.com/XHUJOY/CycleGAN-tensorflow">[Tensorflow]</a> (by Xiaowei Hu), | ||
<a href="https://github.com/LynnHo/CycleGAN-Tensorflow-Simple"> [Tensorflow-simple]</a> (by Zhenliang He), | ||
<a href="https://github.com/luoxier/CycleGAN_Tensorlayer"> [TensorLayer]</a> (by luoxier), | ||
<a href="https://github.com/Aixile/chainer-cyclegan">[Chainer]</a> (by Yanghua Jin), | ||
<a href="https://github.com/yunjey/mnist-svhn-transfer">[Minimal PyTorch]</a> (by yunjey), | ||
<a href="https://github.com/Ldpe2G/DeepLearningForFun/tree/master/Mxnet-Scala/CycleGAN">[Mxnet]</a> (by Ldpe2G), | ||
<a href="https://github.com/tjwei/GANotebooks">[lasagne/keras]</a> (by tjwei)</p> | ||
</ul> | ||
|
||
### pix2pix | ||
<p><a href="https://github.com/affinelayer/pix2pix-tensorflow"> [Tensorflow]</a> (by Christopher Hesse), | ||
<a href="https://github.com/Eyyub/tensorflow-pix2pix">[Tensorflow]</a> (by Eyyüb Sariu), | ||
<a href="https://github.com/datitran/face2face-demo"> [Tensorflow (face2face)]</a> (by Dat Tran), | ||
<a href="https://github.com/awjuliani/Pix2Pix-Film"> [Tensorflow (film)]</a> (by Arthur Juliani), | ||
<a href="https://github.com/kaonashi-tyc/zi2zi">[Tensorflow (zi2zi)]</a> (by Yuchen Tian), | ||
<a href="https://github.com/pfnet-research/chainer-pix2pix">[Chainer]</a> (by mattya), | ||
<a href="https://github.com/tjwei/GANotebooks">[tf/torch/keras/lasagne]</a> (by tjwei), | ||
<a href="https://github.com/taey16/pix2pixBEGAN.pytorch">[Pytorch]</a> (by taey16) | ||
</p> | ||
</ul> | ||
|
||
## Prerequisites | ||
- Linux or macOS | ||
- Python 2 or 3 | ||
- CPU or NVIDIA GPU + CUDA CuDNN | ||
|
||
## Getting Started | ||
### Installation | ||
|
||
- Clone this repo: | ||
```bash | ||
git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix | ||
cd pytorch-CycleGAN-and-pix2pix | ||
``` | ||
|
||
- Install PyTorch 0.4+ and torchvision from http://pytorch.org and other dependencies (e.g., [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)). You can install all the dependencies by | ||
```bash | ||
pip install -r requirements.txt | ||
``` | ||
|
||
- For Conda users, we include a script `./scripts/conda_deps.sh` to install PyTorch and other libraries. | ||
|
||
### CycleGAN train/test | ||
- Download a CycleGAN dataset (e.g. maps): | ||
```bash | ||
bash ./datasets/download_cyclegan_dataset.sh maps | ||
``` | ||
- Train a model: | ||
```bash | ||
#!./scripts/train_cyclegan.sh | ||
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan | ||
``` | ||
- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. To see more intermediate results, check out `./checkpoints/maps_cyclegan/web/index.html` | ||
- Test the model: | ||
```bash | ||
#!./scripts/test_cyclegan.sh | ||
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan | ||
``` | ||
The test results will be saved to a html file here: `./results/maps_cyclegan/latest_test/index.html`. | ||
|
||
### pix2pix train/test | ||
- Download a pix2pix dataset (e.g.facades): | ||
```bash | ||
bash ./datasets/download_pix2pix_dataset.sh facades | ||
``` | ||
- Train a model: | ||
```bash | ||
#!./scripts/train_pix2pix.sh | ||
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA | ||
``` | ||
- To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097. To see more intermediate results, check out `./checkpoints/facades_pix2pix/web/index.html` | ||
- Test the model (`bash ./scripts/test_pix2pix.sh`): | ||
```bash | ||
#!./scripts/test_pix2pix.sh | ||
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA | ||
``` | ||
The test results will be saved to a html file here: `./results/facades_pix2pix/test_latest/index.html`. | ||
|
||
You can find more scripts at `scripts` directory. | ||
|
||
### Apply a pre-trained model (CycleGAN) | ||
- You can download a pretrained model (e.g. horse2zebra) with the following script: | ||
```bash | ||
bash ./scripts/download_cyclegan_model.sh horse2zebra | ||
``` | ||
The pretrained model is saved at `./checkpoints/{name}_pretrained/latest_net_G.pth`. Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) for all the available CycleGAN models. | ||
- To test the model, you also need to download the horse2zebra dataset: | ||
```bash | ||
bash ./datasets/download_cyclegan_dataset.sh horse2zebra | ||
``` | ||
|
||
- Then generate the results using | ||
```bash | ||
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test | ||
``` | ||
The option `--model test` is used for generating results of CycleGAN only for one side. `python test.py --model cycle_gan` will require loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at `./results/`. Use `--results_dir {directory_path_to_save_result}` to specify the results directory. | ||
|
||
- If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use `--dataset_mode single` and `--model test` options. Here is a script to apply a model to Facade label maps (stored in the directory `facades/testB`). | ||
``` bash | ||
#!./scripts/test_single.sh | ||
python test.py --dataroot ./datasets/facades/testB/ --name {your_trained_model_name} --model test | ||
``` | ||
You might want to specify `--netG` to match the generator architecture of the trained model. | ||
|
||
### Apply a pre-trained model (pix2pix) | ||
|
||
Download a pre-trained model with `./scripts/download_pix2pix_model.sh`. | ||
|
||
- Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) for all the available pix2pix models. For example, if you would like to download label2photo model on the Facades dataset, | ||
```bash | ||
bash ./scripts/download_pix2pix_model.sh facades_label2photo | ||
``` | ||
- Download the pix2pix facades datasets: | ||
```bash | ||
bash ./datasets/download_pix2pix_dataset.sh facades | ||
``` | ||
- Then generate the results using | ||
```bash | ||
python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained | ||
``` | ||
Note that we specified `--direction BtoA` as Facades dataset's A to B direction is photos to labels. | ||
|
||
- See a list of currently available models at `./scripts/download_pix2pix_model.sh` | ||
|
||
## [Datasets](docs/datasets.md) | ||
Download pix2pix/CycleGAN datasets and create your own datasets. | ||
|
||
## [Training/Test Tips](docs/tips.md) | ||
Best practice for training and testing your models. | ||
|
||
## [Frequently Asked Questions](docs/qa.md) | ||
Before you post a new question, please first look at the above Q & A and existing GitHub issues. | ||
|
||
|
||
## Citation | ||
If you use this code for your research, please cite our papers. | ||
``` | ||
@inproceedings{CycleGAN2017, | ||
title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss}, | ||
author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A}, | ||
booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on}, | ||
year={2017} | ||
} | ||
@inproceedings{isola2017image, | ||
title={Image-to-Image Translation with Conditional Adversarial Networks}, | ||
author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, | ||
booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on}, | ||
year={2017} | ||
} | ||
``` | ||
|
||
|
||
|
||
## Related Projects | ||
**[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) | | ||
[pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD) | | ||
[iGAN](https://github.com/junyanz/iGAN) | | ||
[BicycleGAN](https://github.com/junyanz/BicycleGAN)** | ||
|
||
## Cat Paper Collection | ||
If you love cats, and love reading cool graphics, vision, and learning papers, please check out the Cat Paper [Collection](https://github.com/junyanz/CatPapers). | ||
|
||
## Acknowledgments | ||
Our code is inspired by [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan). |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
import importlib | ||
import torch.utils.data | ||
from data.base_data_loader import BaseDataLoader | ||
from data.base_dataset import BaseDataset | ||
|
||
|
||
def find_dataset_using_name(dataset_name): | ||
# Given the option --dataset_mode [datasetname], | ||
# the file "data/datasetname_dataset.py" | ||
# will be imported. | ||
dataset_filename = "data." + dataset_name + "_dataset" | ||
datasetlib = importlib.import_module(dataset_filename) | ||
|
||
# In the file, the class called DatasetNameDataset() will | ||
# be instantiated. It has to be a subclass of BaseDataset, | ||
# and it is case-insensitive. | ||
dataset = None | ||
target_dataset_name = dataset_name.replace('_', '') + 'dataset' | ||
for name, cls in datasetlib.__dict__.items(): | ||
if name.lower() == target_dataset_name.lower() \ | ||
and issubclass(cls, BaseDataset): | ||
dataset = cls | ||
|
||
if dataset is None: | ||
print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) | ||
exit(0) | ||
|
||
return dataset | ||
|
||
|
||
def get_option_setter(dataset_name): | ||
dataset_class = find_dataset_using_name(dataset_name) | ||
return dataset_class.modify_commandline_options | ||
|
||
|
||
def create_dataset(opt): | ||
dataset = find_dataset_using_name(opt.dataset_mode) | ||
instance = dataset() | ||
instance.initialize(opt) | ||
print("dataset [%s] was created" % (instance.name())) | ||
return instance | ||
|
||
|
||
def CreateDataLoader(opt): | ||
data_loader = CustomDatasetDataLoader() | ||
data_loader.initialize(opt) | ||
return data_loader | ||
|
||
|
||
# Wrapper class of Dataset class that performs | ||
# multi-threaded data loading | ||
class CustomDatasetDataLoader(BaseDataLoader): | ||
def name(self): | ||
return 'CustomDatasetDataLoader' | ||
|
||
def initialize(self, opt): | ||
BaseDataLoader.initialize(self, opt) | ||
self.dataset = create_dataset(opt) | ||
self.dataloader = torch.utils.data.DataLoader( | ||
self.dataset, | ||
batch_size=opt.batch_size, | ||
shuffle=not opt.serial_batches, | ||
num_workers=int(opt.num_threads)) | ||
|
||
def load_data(self): | ||
return self | ||
|
||
def __len__(self): | ||
return min(len(self.dataset), self.opt.max_dataset_size) | ||
|
||
def __iter__(self): | ||
for i, data in enumerate(self.dataloader): | ||
if i * self.opt.batch_size >= self.opt.max_dataset_size: | ||
break | ||
yield data |
Binary file not shown.
Oops, something went wrong.